code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
snake_case__ : List[str] = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class _a :
"""simple docstring"""
A_ = 42
A_ = None
A_ = None
A_ = None
A_ = None
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Union[str, Any]:
return f"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def _UpperCAmelCase ( self ) -> int:
return self.major, self.minor, self.patch
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> str:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return Version(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return other
raise TypeError(f"""{other} (type {type(_UpperCAmelCase )}) cannot be compared to version.""" )
def __eq__( self , _UpperCAmelCase ) -> List[Any]:
try:
UpperCamelCase_ = self._validate_operand(_UpperCAmelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = self._validate_operand(_UpperCAmelCase )
return self.tuple < other.tuple
def __hash__( self ) -> Dict:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _UpperCAmelCase ( self ) -> str:
return self.version_str
def _snake_case (__lowercase):
UpperCamelCase_ = _VERSION_REG.match(__lowercase)
if not res:
raise ValueError(f"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""")
return tuple(int(__lowercase) for v in [res.group('major'), res.group('minor'), res.group('patch')])
def _snake_case (__lowercase):
return ".".join(str(__lowercase) for v in version_tuple)
| 23
|
import datasets
from .evaluate import evaluate
snake_case__ : int = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
snake_case__ : Union[str, Any] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
snake_case__ : Any = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
UpperCamelCase_ = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
UpperCamelCase_ = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
UpperCamelCase_ = evaluate(dataset=_UpperCAmelCase , predictions=_UpperCAmelCase )
return score
| 23
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ : Any = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 720
|
"""simple docstring"""
from __future__ import annotations
import bisect
def UpperCAmelCase__ ( A__ , A__ , A__ = 0 , A__ = -1 ) -> int:
"""simple docstring"""
if hi < 0:
lowerCamelCase__ = len(A__ )
while lo < hi:
lowerCamelCase__ = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowerCamelCase__ = mid + 1
else:
lowerCamelCase__ = mid
return lo
def UpperCAmelCase__ ( A__ , A__ , A__ = 0 , A__ = -1 ) -> int:
"""simple docstring"""
if hi < 0:
lowerCamelCase__ = len(A__ )
while lo < hi:
lowerCamelCase__ = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowerCamelCase__ = mid + 1
else:
lowerCamelCase__ = mid
return lo
def UpperCAmelCase__ ( A__ , A__ , A__ = 0 , A__ = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_left(A__ , A__ , A__ , A__ ) , A__ )
def UpperCAmelCase__ ( A__ , A__ , A__ = 0 , A__ = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_right(A__ , A__ , A__ , A__ ) , A__ )
def UpperCAmelCase__ ( A__ , A__ ) -> int | None:
"""simple docstring"""
lowerCamelCase__ = 0
lowerCamelCase__ = len(A__ ) - 1
while left <= right:
lowerCamelCase__ = left + (right - left) // 2
lowerCamelCase__ = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowerCamelCase__ = midpoint - 1
else:
lowerCamelCase__ = midpoint + 1
return None
def UpperCAmelCase__ ( A__ , A__ ) -> int | None:
"""simple docstring"""
lowerCamelCase__ = bisect.bisect_left(A__ , A__ )
if index != len(A__ ) and sorted_collection[index] == item:
return index
return None
def UpperCAmelCase__ ( A__ , A__ , A__ , A__ ) -> int | None:
"""simple docstring"""
if right < left:
return None
lowerCamelCase__ = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(A__ , A__ , A__ , midpoint - 1 )
else:
return binary_search_by_recursion(A__ , A__ , midpoint + 1 , A__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip()
SCREAMING_SNAKE_CASE_ : int = sorted(int(item) for item in user_input.split(''','''))
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(input('''Enter a single number to be found in the list:\n'''))
SCREAMING_SNAKE_CASE_ : Dict = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 274
| 0
|
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case :List[str] ='\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
__snake_case :Union[str, Any] ='\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
__snake_case :Optional[Any] ='\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __UpperCamelCase ( self : List[str] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : Any=4 , __UpperCamelCase : Tuple=False ) -> str:
A = compute_bleu(
reference_corpus=__UpperCamelCase , translation_corpus=__UpperCamelCase , max_order=__UpperCamelCase , smooth=__UpperCamelCase )
((A) , (A) , (A) , (A) , (A) , (A)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 106
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE: str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE: Optional[int] = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class lowercase_ (SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ ="longformer"
def __init__( self : Optional[Any] , snake_case__ : Union[List[int], int] = 5_12 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : int = 0 , snake_case__ : int = 2 , snake_case__ : int = 3_05_22 , snake_case__ : int = 7_68 , snake_case__ : int = 12 , snake_case__ : int = 12 , snake_case__ : int = 30_72 , snake_case__ : str = "gelu" , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , snake_case__ : int = 5_12 , snake_case__ : int = 2 , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : bool = False , **snake_case__ : Tuple , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE_ = attention_window
SCREAMING_SNAKE_CASE_ = sep_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = onnx_export
class lowercase_ (SCREAMING_SNAKE_CASE__ ):
def __init__( self : Dict , snake_case__ : "PretrainedConfig" , snake_case__ : str = "default" , snake_case__ : "List[PatchingSpec]" = None ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE_ = True
@property
def __a ( self : Union[str, Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE_ = {0: 'batch'}
return outputs
@property
def __a ( self : Optional[int] ):
"""simple docstring"""
return 1e-4
@property
def __a ( self : List[str] ):
"""simple docstring"""
return max(super().default_onnx_opset , 14 )
def __a ( self : List[str] , snake_case__ : "PreTrainedTokenizerBase" , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs(
preprocessor=snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE_ = torch.zeros_like(inputs['input_ids'] )
# make every second token global
SCREAMING_SNAKE_CASE_ = 1
return inputs
| 360
| 0
|
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A_ : Union[str, Any] = "src/diffusers"
A_ : Union[str, Any] = "."
# This is to make sure the diffusers module imported is the one in the repo.
A_ : Union[str, Any] = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
A_ : str = spec.loader.load_module()
def A_ ( snake_case , snake_case ):
return line.startswith(__A ) or len(__A ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , __A ) is not None
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = object_name.split("." )
SCREAMING_SNAKE_CASE:Union[str, Any] = 0
# First let's find the module where our object lives.
SCREAMING_SNAKE_CASE:Dict = parts[i]
while i < len(__A ) and not os.path.isfile(os.path.join(__A , F'''{module}.py''' ) ):
i += 1
if i < len(__A ):
SCREAMING_SNAKE_CASE:Dict = os.path.join(__A , parts[i] )
if i >= len(__A ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(__A , F'''{module}.py''' ) , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE:Tuple = f.readlines()
# Now let's find the class / func in the code!
SCREAMING_SNAKE_CASE:Union[str, Any] = ''''''
SCREAMING_SNAKE_CASE:Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__A ) and re.search(rF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__A ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
SCREAMING_SNAKE_CASE:Any = line_index
while line_index < len(__A ) and _should_continue(lines[line_index] , __A ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE:Union[str, Any] = lines[start_index:line_index]
return "".join(__A )
A_ : str = re.compile(R"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
A_ : Optional[int] = re.compile(R"^\s*(\S+)->(\S+)(\s+.*|$)")
A_ : List[Any] = re.compile(R"<FILL\s+[^>]*>")
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:int = code.split("\n" )
SCREAMING_SNAKE_CASE:Optional[int] = 0
while idx < len(__A ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__A ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = len(get_indent(__A ) ) > 0
if has_indent:
SCREAMING_SNAKE_CASE:str = F'''class Bla:\n{code}'''
SCREAMING_SNAKE_CASE:List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__A )
SCREAMING_SNAKE_CASE:int = black.format_str(__A , mode=__A )
SCREAMING_SNAKE_CASE:Dict = style_docstrings_in_code(__A )
return result[len("class Bla:\n" ) :] if has_indent else result
def A_ ( snake_case , snake_case=False ):
with open(__A , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE:Optional[int] = f.readlines()
SCREAMING_SNAKE_CASE:int = []
SCREAMING_SNAKE_CASE:Optional[Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__A ):
SCREAMING_SNAKE_CASE:int = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
SCREAMING_SNAKE_CASE:Dict = search.groups()
SCREAMING_SNAKE_CASE:Dict = find_code_in_diffusers(__A )
SCREAMING_SNAKE_CASE:Dict = get_indent(__A )
SCREAMING_SNAKE_CASE:List[str] = line_index + 1 if indent == theoretical_indent else line_index + 2
SCREAMING_SNAKE_CASE:Any = theoretical_indent
SCREAMING_SNAKE_CASE:Dict = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
SCREAMING_SNAKE_CASE:List[Any] = True
while line_index < len(__A ) and should_continue:
line_index += 1
if line_index >= len(__A ):
break
SCREAMING_SNAKE_CASE:Tuple = lines[line_index]
SCREAMING_SNAKE_CASE:Optional[Any] = _should_continue(__A , __A ) and re.search(F'''^{indent}# End copy''' , __A ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE:Any = lines[start_index:line_index]
SCREAMING_SNAKE_CASE:List[str] = ''''''.join(__A )
# Remove any nested `Copied from` comments to avoid circular copies
SCREAMING_SNAKE_CASE:List[Any] = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__A ) is None]
SCREAMING_SNAKE_CASE:Tuple = '''\n'''.join(__A )
# Before comparing, use the `replace_pattern` on the original code.
if len(__A ) > 0:
SCREAMING_SNAKE_CASE:List[str] = replace_pattern.replace("with" , "" ).split("," )
SCREAMING_SNAKE_CASE:List[Any] = [_re_replace_pattern.search(__A ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
SCREAMING_SNAKE_CASE:List[str] = pattern.groups()
SCREAMING_SNAKE_CASE:Union[str, Any] = re.sub(__A , __A , __A )
if option.strip() == "all-casing":
SCREAMING_SNAKE_CASE:str = re.sub(obja.lower() , obja.lower() , __A )
SCREAMING_SNAKE_CASE:Dict = re.sub(obja.upper() , obja.upper() , __A )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
SCREAMING_SNAKE_CASE:str = blackify(lines[start_index - 1] + theoretical_code )
SCREAMING_SNAKE_CASE:Union[str, Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
SCREAMING_SNAKE_CASE:Any = lines[:start_index] + [theoretical_code] + lines[line_index:]
SCREAMING_SNAKE_CASE:int = start_index + 1
if overwrite and len(__A ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(__A , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(__A )
return diffs
def A_ ( snake_case = False ):
SCREAMING_SNAKE_CASE:int = glob.glob(os.path.join(__A , "**/*.py" ) , recursive=__A )
SCREAMING_SNAKE_CASE:List[Any] = []
for filename in all_files:
SCREAMING_SNAKE_CASE:Any = is_copy_consistent(__A , __A )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(__A ) > 0:
SCREAMING_SNAKE_CASE:Optional[int] = '''\n'''.join(__A )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
A_ : Tuple = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 708
|
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def A_ ( snake_case ):
@wraps(snake_case )
def _inner_fn(*snake_case , **snake_case ):
warnings.warn(
(F'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , snake_case , )
return fn(*snake_case , **snake_case )
return _inner_fn
| 465
| 0
|
from __future__ import annotations
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = 2
lowerCAmelCase__ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase_ )
if n > 1:
factors.append(lowerCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = 42
snake_case__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('>=', '0.0.12')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = 42
snake_case__ = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 61
| 1
|
import math
import tensorflow as tf
from packaging import version
def __UpperCamelCase ( A ):
UpperCamelCase__ = tf.convert_to_tensor(A )
UpperCamelCase__ = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __UpperCamelCase ( A ):
UpperCamelCase__ = tf.convert_to_tensor(A )
UpperCamelCase__ = tf.cast(math.pi , x.dtype )
UpperCamelCase__ = tf.cast(0.04_47_15 , x.dtype )
UpperCamelCase__ = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(A , 3 )) ))
return x * cdf
def __UpperCamelCase ( A ):
UpperCamelCase__ = tf.convert_to_tensor(A )
return x * tf.tanh(tf.math.softplus(A ) )
def __UpperCamelCase ( A ):
UpperCamelCase__ = tf.convert_to_tensor(A )
UpperCamelCase__ = tf.cast(0.04_47_15 , x.dtype )
UpperCamelCase__ = tf.cast(0.79_78_84_56_08 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __UpperCamelCase ( A ):
UpperCamelCase__ = tf.convert_to_tensor(A )
UpperCamelCase__ = tf.cast(1.7_02 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __UpperCamelCase ( A ):
return tf.clip_by_value(_gelu(A ) , -10 , 10 )
def __UpperCamelCase ( A , A=-1 ):
UpperCamelCase__ , UpperCamelCase__ = tf.split(A , 2 , axis=A )
return a * tf.math.sigmoid(A )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __UpperCamelCase ( A ):
return tf.keras.activations.gelu(A , approximate=A )
__magic_name__ =tf.keras.activations.gelu
__magic_name__ =approximate_gelu_wrap
else:
__magic_name__ =_gelu
__magic_name__ =_gelu_new
__magic_name__ ={
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def __UpperCamelCase ( A ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 469
|
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class _A :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=None , ) -> str:
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = decoder_seq_length
# For common tests
UpperCamelCase__ = self.decoder_seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_attention_mask
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = d_model
UpperCamelCase__ = d_model
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = bos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = decoder_start_token_id
UpperCamelCase__ = use_cache
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = None
UpperCamelCase__ = decoder_seq_length
UpperCamelCase__ = 2
UpperCamelCase__ = 1
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_attention_mask:
UpperCamelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCamelCase__ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Any:
'''simple docstring'''
UpperCamelCase__ = True
UpperCamelCase__ = TrOCRDecoder(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCamelCase__ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) + 1 )
UpperCamelCase__ = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )['''last_hidden_state''']
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )['''last_hidden_state''']
# select random slice
UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
def _a (self ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class _A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Tuple =(TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Any =(TrOCRForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[int] ={"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : Optional[int] =True
SCREAMING_SNAKE_CASE_ : Dict =False
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = TrOCRStandaloneDecoderModelTester(self , is_training=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Dict:
'''simple docstring'''
pass
def _a (self ) -> List[str]:
'''simple docstring'''
pass
def _a (self ) -> int:
'''simple docstring'''
pass
def _a (self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*SCREAMING_SNAKE_CASE_ )
def _a (self ) -> List[Any]:
'''simple docstring'''
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _a (self ) -> Optional[int]:
'''simple docstring'''
pass
| 469
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCamelCase = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
_lowerCamelCase = {
'camembert-base': 512,
}
_lowerCamelCase = '▁'
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self , a__ , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__=["<s>NOTUSED", "</s>NOTUSED"] , a__ = None , **a__ , ):
"""simple docstring"""
_lowerCamelCase : Tuple = AddedToken(a__ , lstrip=a__ , rstrip=a__) if isinstance(a__ , a__) else mask_token
_lowerCamelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , additional_special_tokens=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
_lowerCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(a__))
_lowerCamelCase : Dict = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
_lowerCamelCase : Dict = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
_lowerCamelCase : Any = len(self.fairseq_tokens_to_ids)
_lowerCamelCase : Dict = len(self.sp_model) + len(self.fairseq_tokens_to_ids)
_lowerCamelCase : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __snake_case ( self , a__ , a__ = None):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase : Tuple = [self.cls_token_id]
_lowerCamelCase : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self , a__ , a__ = None , a__ = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__)
if token_ids_a is None:
return [1] + ([0] * len(a__)) + [1]
return [1] + ([0] * len(a__)) + [1, 1] + ([0] * len(a__)) + [1]
def __snake_case ( self , a__ , a__ = None):
"""simple docstring"""
_lowerCamelCase : List[str] = [self.sep_token_id]
_lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def __snake_case ( self):
"""simple docstring"""
return len(self.fairseq_tokens_to_ids) + len(self.sp_model)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[str] = {self.convert_ids_to_tokens(a__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __snake_case ( self , a__):
"""simple docstring"""
return self.sp_model.encode(a__ , out_type=a__)
def __snake_case ( self , a__):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(a__) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(a__)
def __snake_case ( self , a__):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : str = []
_lowerCamelCase : Optional[Any] = ''''''
_lowerCamelCase : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__) + token
_lowerCamelCase : Dict = True
_lowerCamelCase : List[Any] = []
else:
current_sub_tokens.append(a__)
_lowerCamelCase : Union[str, Any] = False
out_string += self.sp_model.decode(a__)
return out_string.strip()
def __getstate__( self):
"""simple docstring"""
_lowerCamelCase : str = self.__dict__.copy()
_lowerCamelCase : List[Any] = None
return state
def __setstate__( self , a__):
"""simple docstring"""
_lowerCamelCase : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
_lowerCamelCase : Optional[Any] = {}
_lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __snake_case ( self , a__ , a__ = None):
"""simple docstring"""
if not os.path.isdir(a__):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
_lowerCamelCase : List[str] = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(a__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , a__)
elif not os.path.isfile(self.vocab_file):
with open(a__ , '''wb''') as fi:
_lowerCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(a__)
return (out_vocab_file,)
| 114
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __A :
"""simple docstring"""
UpperCAmelCase__ = BlenderbotSmallConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = """gelu"""
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=False , a__=99 , a__=32 , a__=2 , a__=4 , a__=37 , a__=0.1 , a__=0.1 , a__=20 , a__=2 , a__=1 , a__=0 , ):
"""simple docstring"""
_lowerCamelCase : str = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : str = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : int = use_labels
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : List[Any] = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Tuple = attention_probs_dropout_prob
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : str = eos_token_id
_lowerCamelCase : Any = pad_token_id
_lowerCamelCase : str = bos_token_id
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
_lowerCamelCase : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
_lowerCamelCase : Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1)
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowerCamelCase : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowerCamelCase : Optional[int] = prepare_blenderbot_small_inputs_dict(a__ , a__ , a__)
return config, inputs_dict
def __snake_case ( self , a__ , a__):
"""simple docstring"""
_lowerCamelCase : Optional[int] = TFBlenderbotSmallModel(config=a__).get_decoder()
_lowerCamelCase : Dict = inputs_dict['''input_ids''']
_lowerCamelCase : int = input_ids[:1, :]
_lowerCamelCase : Union[str, Any] = inputs_dict['''attention_mask'''][:1, :]
_lowerCamelCase : List[str] = inputs_dict['''head_mask''']
_lowerCamelCase : Optional[int] = 1
# first forward pass
_lowerCamelCase : Union[str, Any] = model(a__ , attention_mask=a__ , head_mask=a__ , use_cache=a__)
_lowerCamelCase, _lowerCamelCase : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size)
_lowerCamelCase : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
_lowerCamelCase : Any = tf.concat([input_ids, next_tokens] , axis=-1)
_lowerCamelCase : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1)
_lowerCamelCase : List[str] = model(a__ , attention_mask=a__)[0]
_lowerCamelCase : str = model(a__ , attention_mask=a__ , past_key_values=a__)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
_lowerCamelCase : int = int(ids_tensor((1,) , output_from_past.shape[-1]))
_lowerCamelCase : Any = output_from_no_past[:, -3:, random_slice_idx]
_lowerCamelCase : int = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a__ , a__ , rtol=1e-3)
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ):
if attention_mask is None:
_lowerCamelCase : List[Any] = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowerCamelCase : Union[str, Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowerCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCamelCase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCamelCase : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __A ( lowerCamelCase__ ,lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
UpperCAmelCase__ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ = (
{
"""conversational""": TFBlenderbotSmallForConditionalGeneration,
"""feature-extraction""": TFBlenderbotSmallModel,
"""summarization""": TFBlenderbotSmallForConditionalGeneration,
"""text2text-generation""": TFBlenderbotSmallForConditionalGeneration,
"""translation""": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[str] = TFBlenderbotSmallModelTester(self)
_lowerCamelCase : Optional[int] = ConfigTester(self , config_class=a__)
def __snake_case ( self):
"""simple docstring"""
self.config_tester.run_common_tests()
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a__)
@require_tokenizers
@require_tf
class __A ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = [
"""Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """
""" i'm going to throw up.\nand why is that?"""
]
UpperCAmelCase__ = """facebook/blenderbot_small-90M"""
@cached_property
def __snake_case ( self):
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''')
@cached_property
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.tokenizer(self.src_text , return_tensors='''tf''')
_lowerCamelCase : str = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=a__ , )
_lowerCamelCase : Tuple = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=a__)[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 114
| 1
|
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
a : int = logging.get_logger(__name__)
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ):
'''simple docstring'''
if "." in tensor_name:
UpperCAmelCase : Dict = tensor_name.split("." )
for split in splits[:-1]:
UpperCAmelCase : int = getattr(__magic_name__ , __magic_name__ )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
UpperCAmelCase : Any = new_module
UpperCAmelCase : int = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"{module} does not have a parameter or a buffer named {tensor_name}." )
UpperCAmelCase : str = tensor_name in module._buffers
UpperCAmelCase : Tuple = getattr(__magic_name__ , __magic_name__ )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(F"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
UpperCAmelCase : List[str] = False
UpperCAmelCase : int = False
if is_buffer or not is_bitsandbytes_available():
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Dict = False
else:
UpperCAmelCase : Union[str, Any] = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
UpperCAmelCase : int = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCAmelCase : str = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCAmelCase : Tuple = old_value.to(__magic_name__ )
elif isinstance(__magic_name__ , torch.Tensor ):
UpperCAmelCase : List[Any] = value.to("cpu" )
if value.dtype == torch.inta:
UpperCAmelCase : List[Any] = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
UpperCAmelCase : List[str] = torch.tensor(__magic_name__ , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __magic_name__ ) and fpaa_statistics is None:
UpperCAmelCase : Tuple = new_value.T
UpperCAmelCase : Union[str, Any] = old_value.__dict__
if is_abit:
UpperCAmelCase : Optional[Any] = bnb.nn.IntaParams(__magic_name__ , requires_grad=__magic_name__ , **__magic_name__ ).to(__magic_name__ )
elif is_abit:
UpperCAmelCase : Union[str, Any] = bnb.nn.Paramsabit(__magic_name__ , requires_grad=__magic_name__ , **__magic_name__ ).to(__magic_name__ )
UpperCAmelCase : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(__magic_name__ ) )
else:
if value is None:
UpperCAmelCase : Optional[int] = old_value.to(__magic_name__ )
elif isinstance(__magic_name__ , torch.Tensor ):
UpperCAmelCase : List[Any] = value.to(__magic_name__ )
else:
UpperCAmelCase : Dict = torch.tensor(__magic_name__ , device=__magic_name__ )
if is_buffer:
UpperCAmelCase : Union[str, Any] = new_value
else:
UpperCAmelCase : str = nn.Parameter(__magic_name__ , requires_grad=old_value.requires_grad )
UpperCAmelCase : int = new_value
def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
UpperCAmelCase : List[Any] = []
current_key_name.append(__magic_name__ )
if (isinstance(__magic_name__ , nn.Linear ) or isinstance(__magic_name__ , __magic_name__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(__magic_name__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__magic_name__ , __magic_name__ ):
UpperCAmelCase , UpperCAmelCase : Dict = module.weight.shape
else:
UpperCAmelCase : Union[str, Any] = module.in_features
UpperCAmelCase : int = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCAmelCase : List[str] = bnb.nn.LinearabitLt(
__magic_name__ , __magic_name__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCAmelCase : Optional[int] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCAmelCase : Tuple = bnb.nn.Linearabit(
__magic_name__ , __magic_name__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCAmelCase : str = True
# Store the module class in case we need to transpose the weight later
UpperCAmelCase : Optional[Any] = type(__magic_name__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__magic_name__ )
if len(list(module.children() ) ) > 0:
UpperCAmelCase , UpperCAmelCase : Dict = _replace_with_bnb_linear(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_been_replaced=__magic_name__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
UpperCAmelCase , UpperCAmelCase : List[Any] = _replace_with_bnb_linear(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def lowercase ( *__magic_name__ , **__magic_name__ ):
'''simple docstring'''
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , __magic_name__ , )
return replace_with_bnb_linear(*__magic_name__ , **__magic_name__ )
def lowercase ( *__magic_name__ , **__magic_name__ ):
'''simple docstring'''
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , __magic_name__ , )
return set_module_quantized_tensor_to_device(*__magic_name__ , **__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = deepcopy(__magic_name__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCAmelCase : Tuple = find_tied_parameters(__magic_name__ )
# For compatibility with Accelerate < 0.18
if isinstance(__magic_name__ , __magic_name__ ):
UpperCAmelCase : Optional[Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCAmelCase : Optional[int] = sum(__magic_name__ , [] )
UpperCAmelCase : Dict = len(__magic_name__ ) > 0
# Check if it is a base model
UpperCAmelCase : Dict = not hasattr(__magic_name__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCAmelCase : Optional[int] = list(model.named_children() )
UpperCAmelCase : Any = [list_modules[-1][0]]
# add last module together with tied weights
UpperCAmelCase : Any = set(__magic_name__ ) - set(__magic_name__ )
UpperCAmelCase : Dict = list(set(__magic_name__ ) ) + list(__magic_name__ )
# remove ".weight" from the keys
UpperCAmelCase : Optional[Any] = [".weight", ".bias"]
UpperCAmelCase : Optional[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCAmelCase : Any = name.replace(__magic_name__ , "" )
filtered_module_names.append(__magic_name__ )
return filtered_module_names
| 703
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLNetTokenizer
SCREAMING_SNAKE_CASE__ : int = XLNetTokenizerFast
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : Any = True
def A_ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : List[str] = XLNetTokenizer(snake_case , keep_accents=snake_case )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = "<s>"
UpperCAmelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<eod>" )
self.assertEqual(len(snake_case ) , 1_0_0_6 )
def A_ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = XLNetTokenizer(snake_case , keep_accents=snake_case )
UpperCAmelCase : str = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] )
UpperCAmelCase : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(snake_case )
self.assertListEqual(snake_case , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] )
UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = XLNetTokenizer(snake_case , do_lower_case=snake_case )
UpperCAmelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = XLNetTokenizer(snake_case , do_lower_case=snake_case )
UpperCAmelCase : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = XLNetTokenizer.from_pretrained("xlnet-base-cased" )
UpperCAmelCase : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=snake_case )
UpperCAmelCase : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case )
UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(snake_case )
UpperCAmelCase : str = tokenizer.build_inputs_with_special_tokens(snake_case , snake_case )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = {"input_ids": [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
| 609
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase = {
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 259
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = ShapEImgaImgPipeline
_UpperCAmelCase = ["image"]
_UpperCAmelCase = ["image"]
_UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCAmelCase = False
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 32
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 32
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 8
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : str = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,)
_lowerCAmelCase : Dict = CLIPVisionModel(_A )
return model
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = CLIPImageProcessor(
crop_size=224 ,do_center_crop=_A ,do_normalize=_A ,do_resize=_A ,image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] ,image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] ,resample=3 ,size=224 ,)
return image_processor
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
_lowerCAmelCase : int = PriorTransformer(**_A )
return model
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase : List[str] = ShapERenderer(**_A )
return model
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.dummy_prior
_lowerCAmelCase : Union[str, Any] = self.dummy_image_encoder
_lowerCAmelCase : List[Any] = self.dummy_image_processor
_lowerCAmelCase : List[str] = self.dummy_renderer
_lowerCAmelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp' ,num_train_timesteps=1024 ,prediction_type='sample' ,use_karras_sigmas=_A ,clip_sample=_A ,clip_sample_range=1.0 ,)
_lowerCAmelCase : List[str] = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith('mps' ):
_lowerCAmelCase : Dict = torch.manual_seed(_A )
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : int = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = 'cpu'
_lowerCAmelCase : List[str] = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**_A )
_lowerCAmelCase : Tuple = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[Any] = pipe(**self.get_dummy_inputs(_A ) )
_lowerCAmelCase : Optional[int] = output.images[0]
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase : Union[str, Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = torch_device == 'cpu'
_lowerCAmelCase : List[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=_A ,relax_max_difference=_A ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_dummy_components()
_lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_A )
_lowerCAmelCase : List[Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Tuple = 1
_lowerCAmelCase : str = 2
_lowerCAmelCase : Any = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase : Optional[Any] = batch_size * [inputs[key]]
_lowerCAmelCase : Tuple = pipe(**_A ,num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
_lowerCAmelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
_lowerCAmelCase : Optional[int] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
_lowerCAmelCase : Union[str, Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=_A ).manual_seed(0 )
_lowerCAmelCase : Tuple = pipe(
_A ,generator=_A ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type='np' ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A ,_A )
| 259
| 1
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
class __snake_case ( UpperCAmelCase__ ):
'''simple docstring'''
_snake_case = ["input_features", "is_longer"]
def __init__( self : List[str] , _UpperCamelCase : Optional[Any]=64 , _UpperCamelCase : int=4_8000 , _UpperCamelCase : str=480 , _UpperCamelCase : str=10 , _UpperCamelCase : List[str]=1024 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : int=False , _UpperCamelCase : float = 0 , _UpperCamelCase : float = 1_4000 , _UpperCamelCase : int = None , _UpperCamelCase : str = "fusion" , _UpperCamelCase : str = "repeatpad" , **_UpperCamelCase : Optional[Any] , ) ->str:
"""simple docstring"""
super().__init__(
feature_size=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , padding_value=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
_lowerCamelCase : int = top_db
_lowerCamelCase : Any = truncation
_lowerCamelCase : Optional[Any] = padding
_lowerCamelCase : int = fft_window_size
_lowerCamelCase : Optional[int] = (fft_window_size >> 1) + 1
_lowerCamelCase : Optional[int] = hop_length
_lowerCamelCase : Union[str, Any] = max_length_s
_lowerCamelCase : Union[str, Any] = max_length_s * sampling_rate
_lowerCamelCase : List[str] = sampling_rate
_lowerCamelCase : Optional[int] = frequency_min
_lowerCamelCase : List[Any] = frequency_max
_lowerCamelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm=lowerCamelCase__ , mel_scale="""htk""" , )
_lowerCamelCase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm="""slaney""" , mel_scale="""slaney""" , )
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = copy.deepcopy(self.__dict__)
_lowerCamelCase : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : np.array , _UpperCamelCase : Optional[np.array] = None) ->np.ndarray:
"""simple docstring"""
_lowerCamelCase : int = spectrogram(
lowerCamelCase__ , window_function(self.fft_window_size , """hann""") , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase__ , log_mel="""dB""" , )
return log_mel_spectrogram.T
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : str) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3)
if len(ranges[1]) == 0:
# if the audio is too short, we just use the first chunk
_lowerCamelCase : int = [0]
if len(ranges[2]) == 0:
# if the audio is too short, we just use the first chunk
_lowerCamelCase : List[Any] = [0]
# randomly choose index for each part
_lowerCamelCase : List[Any] = np.random.choice(ranges[0])
_lowerCamelCase : List[str] = np.random.choice(ranges[1])
_lowerCamelCase : List[str] = np.random.choice(ranges[2])
_lowerCamelCase : Dict = mel[idx_front : idx_front + chunk_frames, :]
_lowerCamelCase : int = mel[idx_middle : idx_middle + chunk_frames, :]
_lowerCamelCase : int = mel[idx_back : idx_back + chunk_frames, :]
_lowerCamelCase : Dict = torch.tensor(mel[None, None, :])
_lowerCamelCase : Any = torch.nn.functional.interpolate(
lowerCamelCase__ , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=lowerCamelCase__)
_lowerCamelCase : List[str] = mel_shrink[0][0].numpy()
_lowerCamelCase : List[str] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0)
return mel_fusion
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : np.array , _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : str) ->np.array:
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowerCamelCase : Optional[int] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowerCamelCase : Optional[Any] = len(lowerCamelCase__) - max_length
_lowerCamelCase : List[Any] = np.random.randint(0 , overflow + 1)
_lowerCamelCase : str = waveform[idx : idx + max_length]
_lowerCamelCase : Dict = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney)[None, :]
elif truncation == "fusion":
_lowerCamelCase : List[str] = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters)
_lowerCamelCase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowerCamelCase : str = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowerCamelCase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0)
_lowerCamelCase : List[str] = False
else:
_lowerCamelCase : Optional[Any] = self._random_mel_fusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
_lowerCamelCase : Union[str, Any] = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""")
else:
_lowerCamelCase : Union[str, Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowerCamelCase : Optional[int] = int(max_length / len(lowerCamelCase__))
_lowerCamelCase : int = np.stack(np.tile(lowerCamelCase__ , n_repeat + 1))[:max_length]
if padding == "repeatpad":
_lowerCamelCase : Optional[Any] = int(max_length / len(lowerCamelCase__))
_lowerCamelCase : str = np.stack(np.tile(lowerCamelCase__ , lowerCamelCase__))
_lowerCamelCase : Any = np.pad(lowerCamelCase__ , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0)
if truncation == "fusion":
_lowerCamelCase : List[Any] = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters)
_lowerCamelCase : Tuple = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0)
else:
_lowerCamelCase : List[str] = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney)[None, :]
return input_mel, longer
def __call__( self : str , _UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCamelCase : str = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , **_UpperCamelCase : str , ) ->BatchFeature:
"""simple docstring"""
_lowerCamelCase : Any = truncation if truncation is not None else self.truncation
_lowerCamelCase : Any = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
_lowerCamelCase : Tuple = isinstance(lowerCamelCase__ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""")
_lowerCamelCase : int = is_batched_numpy or (
isinstance(lowerCamelCase__ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
_lowerCamelCase : Tuple = [np.asarray(lowerCamelCase__ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase__ , np.ndarray):
_lowerCamelCase : int = np.asarray(lowerCamelCase__ , dtype=np.floataa)
elif isinstance(lowerCamelCase__ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
_lowerCamelCase : str = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
_lowerCamelCase : int = [np.asarray(lowerCamelCase__)]
# convert to mel spectrogram, truncate and pad if needed.
_lowerCamelCase : List[str] = [
self._get_input_mel(lowerCamelCase__ , max_length if max_length else self.nb_max_samples , lowerCamelCase__ , lowerCamelCase__)
for waveform in raw_speech
]
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Tuple = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase__)
is_longer.append(lowerCamelCase__)
if truncation == "fusion" and sum(lowerCamelCase__) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowerCamelCase : List[Any] = np.random.randint(0 , len(lowerCamelCase__))
_lowerCamelCase : List[str] = True
if isinstance(input_mel[0] , lowerCamelCase__):
_lowerCamelCase : Dict = [np.asarray(lowerCamelCase__ , dtype=np.floataa) for feature in input_mel]
# is_longer is a list of bool
_lowerCamelCase : Any = [[longer] for longer in is_longer]
_lowerCamelCase : str = {"input_features": input_mel, "is_longer": is_longer}
_lowerCamelCase : Dict = BatchFeature(lowerCamelCase__)
if return_tensors is not None:
_lowerCamelCase : Optional[int] = input_features.convert_to_tensors(lowerCamelCase__)
return input_features
| 717
|
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Dict ={"vocab_file": "vocab.json"}
lowerCAmelCase : List[str] ={
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCAmelCase : int ={"mgp-str": 27}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle:
_lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
return len(self.vocab)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = []
for s in text:
char_tokens.extend(_UpperCamelCase)
return char_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]:
"""simple docstring"""
return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase):
logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase))
return
_lowerCamelCase : Tuple = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""")
return (vocab_file,)
| 15
| 0
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Union[str, Any] = ['''image_processor''', '''tokenizer''']
snake_case__ : Tuple = '''CLIPImageProcessor'''
snake_case__ : List[Any] = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Any=None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]:
a_ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , SCREAMING_SNAKE_CASE__ , )
a_ : Tuple = kwargs.pop('feature_extractor' )
a_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __call__( self : List[str] , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Union[str, Any]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
a_ : List[Any] = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if images is not None:
a_ : int = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is not None and images is not None:
a_ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Any ) -> Tuple:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
a_ : Optional[int] = self.tokenizer.model_input_names
a_ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 570
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCAmelCase_ : Any = Lock()
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : List[str] , __A : int , __A : Union[str, Any] , __A : Any , __A : Any , __A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__A )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
a_ : Union[str, Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
a_ : Optional[int] = min(__A , __A )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__A )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
a_ : Optional[int] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
a_ : Optional[int] = max(__A , __A )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__A )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] ) -> List[str]:
"""simple docstring"""
a_ : Tuple = []
a_ : Optional[int] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
a_ : List[str] = Pipe()
a_ : Any = Pipe()
process_array_.append(
Process(
target=__A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
a_ : Any = temp_rs
a_ : Optional[int] = temp_rr
for i in range(1 , len(__A ) - 1 ):
a_ : Union[str, Any] = Pipe()
a_ : Tuple = Pipe()
process_array_.append(
Process(
target=__A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
a_ : Optional[int] = temp_rs
a_ : Any = temp_rr
process_array_.append(
Process(
target=__A , args=(
len(__A ) - 1,
arr[len(__A ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__A ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__A ) ):
a_ : Any = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
"""simple docstring"""
a_ : List[str] = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*__A )
a_ : List[Any] = odd_even_transposition(__A )
print('Sorted List\n' )
print(*__A )
if __name__ == "__main__":
main()
| 570
| 1
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = True , UpperCAmelCase__ = False ):
A__ = scheduler
A__ = optimizers if isinstance(UpperCAmelCase__ , (list, tuple) ) else [optimizers]
A__ = split_batches
A__ = step_with_optimizer
A__ = GradientState()
def __A ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*UpperCAmelCase__ , **UpperCAmelCase__ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*UpperCAmelCase__ , **UpperCAmelCase__ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
A__ = AcceleratorState().num_processes
for _ in range(UpperCAmelCase__ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , "total_steps" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*UpperCAmelCase__ , **UpperCAmelCase__ )
else:
self.scheduler.step(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __A ( self ):
return self.scheduler.get_last_lr()
def __A ( self ):
return self.scheduler.state_dict()
def __A ( self , UpperCAmelCase__ ):
self.scheduler.load_state_dict(UpperCAmelCase__ )
def __A ( self ):
return self.scheduler.get_lr()
def __A ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
return self.scheduler.print_lr(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 232
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : int = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 232
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class lowercase ( unittest.TestCase ):
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = '''ZinengTang/tvlt-base'''
lowerCAmelCase__ : int = tempfile.mkdtemp()
def lowercase_ ( self , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return TvltImageProcessor.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def lowercase_ ( self , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def lowercase_ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = self.get_image_processor()
lowerCAmelCase__ : int = self.get_feature_extractor()
lowerCAmelCase__ : Optional[Any] = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : Optional[int] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , __lowerCAmelCase )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Dict = self.get_image_processor()
lowerCAmelCase__ : List[Any] = self.get_feature_extractor()
lowerCAmelCase__ : int = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
lowerCAmelCase__ : List[Any] = np.ones([12000] )
lowerCAmelCase__ : Dict = feature_extractor(__lowerCAmelCase , return_tensors='''np''' )
lowerCAmelCase__ : Any = processor(audio=__lowerCAmelCase , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.get_image_processor()
lowerCAmelCase__ : Optional[Any] = self.get_feature_extractor()
lowerCAmelCase__ : Tuple = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
lowerCAmelCase__ : List[str] = np.ones([3, 224, 224] )
lowerCAmelCase__ : Optional[int] = image_processor(__lowerCAmelCase , return_tensors='''np''' )
lowerCAmelCase__ : Any = processor(images=__lowerCAmelCase , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.get_image_processor()
lowerCAmelCase__ : int = self.get_feature_extractor()
lowerCAmelCase__ : Tuple = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
lowerCAmelCase__ : List[Any] = np.ones([12000] )
lowerCAmelCase__ : List[Any] = np.ones([3, 224, 224] )
lowerCAmelCase__ : List[Any] = processor(audio=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.get_image_processor()
lowerCAmelCase__ : List[Any] = self.get_feature_extractor()
lowerCAmelCase__ : List[str] = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 233
|
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class a ( lowerCAmelCase_ ):
@slow
@require_torch
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
_UpperCAmelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_UpperCAmelCase = bertabert.config.encoder.vocab_size
_UpperCAmelCase = tokenizer.sep_token_id
_UpperCAmelCase = tokenizer.cls_token_id
_UpperCAmelCase = 128
_UpperCAmelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
_UpperCAmelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
_UpperCAmelCase = train_dataset.select(range(32 ) )
_UpperCAmelCase = val_dataset.select(range(16 ) )
_UpperCAmelCase = 4
def _map_to_encoder_decoder_inputs(__lowerCAmelCase : Dict ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCAmelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=__lowerCAmelCase , max_length=512 )
_UpperCAmelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=__lowerCAmelCase , max_length=128 )
_UpperCAmelCase = inputs.input_ids
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = outputs.input_ids
_UpperCAmelCase = outputs.input_ids.copy()
_UpperCAmelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_UpperCAmelCase = outputs.attention_mask
assert all(len(__lowerCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(__lowerCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__lowerCAmelCase : int ):
_UpperCAmelCase = pred.label_ids
_UpperCAmelCase = pred.predictions
# all unnecessary tokens are removed
_UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
_UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
_UpperCAmelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__lowerCAmelCase ) )] ) / len(__lowerCAmelCase )
return {"accuracy": accuracy}
# map train dataset
_UpperCAmelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCAmelCase , batch_size=__lowerCAmelCase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
_UpperCAmelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCAmelCase , batch_size=__lowerCAmelCase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = SeqaSeqTrainingArguments(
output_dir=__lowerCAmelCase , per_device_train_batch_size=__lowerCAmelCase , per_device_eval_batch_size=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , evaluation_strategy="""steps""" , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCAmelCase = SeqaSeqTrainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , compute_metrics=_compute_metrics , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , tokenizer=__lowerCAmelCase , )
# start training
trainer.train()
| 277
| 0
|
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
_validate_point(lowerCamelCase__ )
_validate_point(lowerCamelCase__ )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(lowerCamelCase__ , lowerCamelCase__ ) ) )
def lowerCamelCase_ ( lowerCamelCase__ ):
if point:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
for item in point:
if not isinstance(lowerCamelCase__ , (int, float) ):
lowerCamelCase_ = (
"Expected a list of numbers as input, found "
F'{type(lowerCamelCase__ ).__name__}'
)
raise TypeError(lowerCamelCase__ )
else:
lowerCamelCase_ = F'Expected a list of numbers as input, found {type(lowerCamelCase__ ).__name__}'
raise TypeError(lowerCamelCase__ )
else:
raise ValueError("Missing an input" )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
_validate_point(lowerCamelCase__ )
_validate_point(lowerCamelCase__ )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(lowerCamelCase__ , lowerCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__A ='''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
__A ='''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
__A ='''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=4 , lowercase=False ) -> int:
lowerCamelCase_ = compute_bleu(
reference_corpus=lowercase , translation_corpus=lowercase , max_order=lowercase , smooth=lowercase )
((lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 313
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["""CLIPFeatureExtractor"""]
_lowercase = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 443
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 443
| 1
|
'''simple docstring'''
import os
import string
import sys
SCREAMING_SNAKE_CASE__ = 1 << 8
SCREAMING_SNAKE_CASE__ = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
SCREAMING_SNAKE_CASE__ = KEYMAP["up"]
SCREAMING_SNAKE_CASE__ = KEYMAP["left"]
if sys.platform == "win32":
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = {
B"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
B"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
SCREAMING_SNAKE_CASE__ = ord(str(i))
def lowerCamelCase ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowercase__ = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_snake_case ) == 0:
# Read the keystroke
lowercase__ = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowercase__ = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowercase__ = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(_snake_case )
if ord(_snake_case ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowercase__ = chr(KEYMAP["esc"] )
except KeyError:
lowercase__ = cha[1]
else:
lowercase__ = ch.decode(_snake_case )
else:
lowercase__ = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowercase__ = sys.stdin.fileno()
lowercase__ = termios.tcgetattr(_snake_case )
try:
tty.setraw(_snake_case )
lowercase__ = sys.stdin.read(1 )
finally:
termios.tcsetattr(_snake_case ,termios.TCSADRAIN ,_snake_case )
return ch
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = get_raw_chars()
if ord(_snake_case ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_snake_case ) == KEYMAP["esc"]:
lowercase__ = get_raw_chars()
if ord(_snake_case ) == KEYMAP["mod_int"]:
lowercase__ = get_raw_chars()
if ord(_snake_case ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_snake_case ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_snake_case ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 705
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class snake_case (unittest.TestCase , UpperCamelCase ):
def _a ( self ) -> List[str]:
lowercase__ = load_tool("text-classification" )
self.tool.setup()
lowercase__ = load_tool("text-classification" ,remote=UpperCAmelCase_ )
def _a ( self ) -> Any:
lowercase__ = self.tool("That's quite cool" ,["positive", "negative"] )
self.assertEqual(UpperCAmelCase_ ,"positive" )
def _a ( self ) -> Optional[int]:
lowercase__ = self.remote_tool("That's quite cool" ,["positive", "negative"] )
self.assertEqual(UpperCAmelCase_ ,"positive" )
def _a ( self ) -> List[Any]:
lowercase__ = self.tool(text="That's quite cool" ,labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase_ ,"positive" )
def _a ( self ) -> List[Any]:
lowercase__ = self.remote_tool(text="That's quite cool" ,labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase_ ,"positive" )
| 539
| 0
|
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
A = TypeVar('''T''')
class __lowercase ( Generic[T] ):
'''simple docstring'''
__lowerCAmelCase = 42 # Cache store of keys
__lowerCAmelCase = 42 # References of the keys in cache
__lowerCAmelCase = 10 # Maximum capacity of cache
def __init__( self , _UpperCAmelCase ):
__a : List[str] = deque()
__a : int = set()
if not n:
__a : List[str] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
__a : Tuple = n
def _lowerCamelCase ( self , _UpperCAmelCase ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__a : Any = self.dq_store.pop()
self.key_reference.remove(UpperCAmelCase_ )
else:
self.dq_store.remove(UpperCAmelCase_ )
self.dq_store.appendleft(UpperCAmelCase_ )
self.key_reference.add(UpperCAmelCase_ )
def _lowerCamelCase ( self ):
for k in self.dq_store:
print(UpperCAmelCase_ )
def __repr__( self ):
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
A = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 52
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
else:
SCREAMING_SNAKE_CASE__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE__ = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
SCREAMING_SNAKE_CASE__ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ = name.split(UpperCamelCase_ )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE__ = mapped_key.replace('*' , UpperCamelCase_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ = 'weight_v'
elif "weight" in name:
SCREAMING_SNAKE_CASE__ = 'weight'
elif "bias" in name:
SCREAMING_SNAKE_CASE__ = 'bias'
else:
SCREAMING_SNAKE_CASE__ = None
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
continue
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(F'Unused weights: {unused_weights}' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE__ = name.split('.' )
SCREAMING_SNAKE_CASE__ = int(items[0] )
SCREAMING_SNAKE_CASE__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = SEWConfig()
if is_finetuned:
SCREAMING_SNAKE_CASE__ = model.wav_encoder.wav_model.cfg
else:
SCREAMING_SNAKE_CASE__ = model.cfg
SCREAMING_SNAKE_CASE__ = fs_config.conv_bias
SCREAMING_SNAKE_CASE__ = eval(fs_config.conv_feature_layers )
SCREAMING_SNAKE_CASE__ = [x[0] for x in conv_layers]
SCREAMING_SNAKE_CASE__ = [x[1] for x in conv_layers]
SCREAMING_SNAKE_CASE__ = [x[2] for x in conv_layers]
SCREAMING_SNAKE_CASE__ = 'gelu'
SCREAMING_SNAKE_CASE__ = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
SCREAMING_SNAKE_CASE__ = 0.0
SCREAMING_SNAKE_CASE__ = fs_config.activation_fn.name
SCREAMING_SNAKE_CASE__ = fs_config.encoder_embed_dim
SCREAMING_SNAKE_CASE__ = 0.02
SCREAMING_SNAKE_CASE__ = fs_config.encoder_ffn_embed_dim
SCREAMING_SNAKE_CASE__ = 1e-5
SCREAMING_SNAKE_CASE__ = fs_config.encoder_layerdrop
SCREAMING_SNAKE_CASE__ = fs_config.encoder_attention_heads
SCREAMING_SNAKE_CASE__ = fs_config.conv_pos_groups
SCREAMING_SNAKE_CASE__ = fs_config.conv_pos
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = fs_config.encoder_layers
SCREAMING_SNAKE_CASE__ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
SCREAMING_SNAKE_CASE__ = model.cfg
SCREAMING_SNAKE_CASE__ = fs_config.final_dropout
SCREAMING_SNAKE_CASE__ = fs_config.layerdrop
SCREAMING_SNAKE_CASE__ = fs_config.activation_dropout
SCREAMING_SNAKE_CASE__ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
SCREAMING_SNAKE_CASE__ = fs_config.attention_dropout
SCREAMING_SNAKE_CASE__ = fs_config.dropout_input
SCREAMING_SNAKE_CASE__ = fs_config.dropout
SCREAMING_SNAKE_CASE__ = fs_config.mask_channel_length
SCREAMING_SNAKE_CASE__ = fs_config.mask_channel_prob
SCREAMING_SNAKE_CASE__ = fs_config.mask_length
SCREAMING_SNAKE_CASE__ = fs_config.mask_prob
SCREAMING_SNAKE_CASE__ = 'Wav2Vec2FeatureExtractor'
SCREAMING_SNAKE_CASE__ = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True ) -> List[str]:
'''simple docstring'''
if is_finetuned:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
SCREAMING_SNAKE_CASE__ = SEWConfig.from_pretrained(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = convert_config(model[0] , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = model[0].eval()
SCREAMING_SNAKE_CASE__ = True if config.feat_extract_norm == 'layer' else False
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE__ = Dictionary.load(UpperCamelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE__ = target_dict.pad_index
SCREAMING_SNAKE_CASE__ = target_dict.bos_index
SCREAMING_SNAKE_CASE__ = target_dict.pad_index
SCREAMING_SNAKE_CASE__ = target_dict.bos_index
SCREAMING_SNAKE_CASE__ = target_dict.eos_index
SCREAMING_SNAKE_CASE__ = len(target_dict.symbols )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , 'vocab.json' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCamelCase_ ) )
return
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = WavaVecaCTCTokenizer(
UpperCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor(feature_extractor=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
processor.save_pretrained(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = SEWForCTC(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = SEWModel(UpperCamelCase_ )
feature_extractor.save_pretrained(UpperCamelCase_ )
recursively_load_weights(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
hf_model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__snake_case = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 472
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = WavaVecaForSequenceClassification.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
lowerCAmelCase__ :Any = downstream_dict["""projector.weight"""]
lowerCAmelCase__ :Dict = downstream_dict["""projector.bias"""]
lowerCAmelCase__ :Dict = downstream_dict["""model.post_net.linear.weight"""]
lowerCAmelCase__ :Optional[int] = downstream_dict["""model.post_net.linear.bias"""]
return model
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
"""simple docstring"""
lowerCAmelCase__ :Any = WavaVecaForAudioFrameClassification.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
lowerCAmelCase__ :Dict = downstream_dict["""model.linear.weight"""]
lowerCAmelCase__ :Tuple = downstream_dict["""model.linear.bias"""]
return model
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = WavaVecaForXVector.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
lowerCAmelCase__ :str = downstream_dict["""connector.weight"""]
lowerCAmelCase__ :Any = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowerCAmelCase__ :Optional[int] = downstream_dict[
F"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
lowerCAmelCase__ :List[Any] = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
lowerCAmelCase__ :Union[str, Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
lowerCAmelCase__ :Optional[int] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
lowerCAmelCase__ :Tuple = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
lowerCAmelCase__ :Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
lowerCAmelCase__ :List[Any] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ :int = torch.load(UpperCamelCase__ , map_location='cpu' )
lowerCAmelCase__ :List[str] = checkpoint["""Downstream"""]
lowerCAmelCase__ :int = WavaVecaConfig.from_pretrained(UpperCamelCase__ )
lowerCAmelCase__ :int = WavaVecaFeatureExtractor.from_pretrained(
UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , do_normalize=UpperCamelCase__ )
lowerCAmelCase__ :Any = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
lowerCAmelCase__ :List[str] = convert_classification(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif arch.endswith('ForAudioFrameClassification' ):
lowerCAmelCase__ :Tuple = convert_diarization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif arch.endswith('ForXVector' ):
lowerCAmelCase__ :str = convert_xvector(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
lowerCAmelCase__ :str = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
__A = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 720
|
"""simple docstring"""
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = None
lowerCAmelCase__ :List[str] = None
lowerCAmelCase__ :Optional[int] = graph
self._normalize_graph(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Dict = len(__UpperCAmelCase )
lowerCAmelCase__ :str = None
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if sources is int:
lowerCAmelCase__ :List[str] = [sources]
if sinks is int:
lowerCAmelCase__ :Optional[Any] = [sinks]
if len(__UpperCAmelCase ) == 0 or len(__UpperCAmelCase ) == 0:
return
lowerCAmelCase__ :List[str] = sources[0]
lowerCAmelCase__ :List[str] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__UpperCAmelCase ) > 1 or len(__UpperCAmelCase ) > 1:
lowerCAmelCase__ :Tuple = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowerCAmelCase__ :List[str] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowerCAmelCase__ :Any = max_input_flow
lowerCAmelCase__ :Optional[Any] = 0
lowerCAmelCase__ :Optional[int] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowerCAmelCase__ :Optional[int] = max_input_flow
lowerCAmelCase__ :Tuple = size - 1
def snake_case ( self ):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = algorithm(self )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = flow_network
lowerCAmelCase__ :List[Any] = flow_network.verticesCount
lowerCAmelCase__ :Optional[Any] = flow_network.sourceIndex
lowerCAmelCase__ :Tuple = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowerCAmelCase__ :Optional[int] = flow_network.graph
lowerCAmelCase__ :List[str] = False
def snake_case ( self ):
'''simple docstring'''
if not self.executed:
self._algorithm()
lowerCAmelCase__ :List[Any] = True
def snake_case ( self ):
'''simple docstring'''
pass
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
# use this to save your result
lowerCAmelCase__ :Dict = -1
def snake_case ( self ):
'''simple docstring'''
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowerCAmelCase__ :int = [0] * self.verticies_count
lowerCAmelCase__ :str = [0] * self.verticies_count
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowerCAmelCase__ :str = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowerCAmelCase__ :int = 0
while i < len(__UpperCAmelCase ):
lowerCAmelCase__ :Tuple = vertices_list[i]
lowerCAmelCase__ :List[Any] = self.heights[vertex_index]
self.process_vertex(__UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__UpperCAmelCase ) )
lowerCAmelCase__ :int = 0
else:
i += 1
lowerCAmelCase__ :Tuple = sum(self.preflow[self.source_index] )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__UpperCAmelCase , __UpperCAmelCase )
self.relabel(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowerCAmelCase__ :Any = self.heights[to_index]
if min_height is not None:
lowerCAmelCase__ :Any = min_height + 1
if __name__ == "__main__":
__A = [0]
__A = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__A = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__A = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__A = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 560
| 0
|
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
A__ : int = logging.get_logger(__name__)
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = None
@experimental
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return _map_with_joblib(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = num_proc if num_proc <= len(lowerCamelCase_ ) else len(lowerCamelCase_ )
lowercase__ = [] # We organize the splits ourselve (contiguous splits)
for index in range(lowerCamelCase_ ):
lowercase__ = len(lowerCamelCase_ ) // num_proc
lowercase__ = len(lowerCamelCase_ ) % num_proc
lowercase__ = div * index + min(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(lowerCamelCase_ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(lowerCamelCase_ )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(lowerCamelCase_ )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
lowercase__ , lowercase__ = None, None
if not disable_tqdm:
lowercase__ , lowercase__ = (RLock(),), tqdm.set_lock
with Pool(lowerCamelCase_ , initargs=lowerCamelCase_ , initializer=lowerCamelCase_ ) as pool:
lowercase__ = pool.map(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F"""Finished {num_proc} processes""" )
lowercase__ = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(lowerCamelCase_ )} objects""" )
return mapped
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=lowerCamelCase_ ):
return joblib.Parallel()(
joblib.delayed(lowerCamelCase_ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowercase__ = None
| 183
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
A__ : Union[str, Any] = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def a ( lowerCamelCase_ ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase_ , PIL.Image.Image ):
lowercase__ = [image]
lowercase__ = [trans(img.convert('''RGB''' ) ) for img in image]
lowercase__ = torch.stack(lowerCamelCase_ )
return image
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Tuple, lowerCamelCase : Dict, lowerCamelCase : str ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCamelCase, scheduler=lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : Optional[int] ):
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def lowercase__ ( self : Optional[int], lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : List[str] ):
'''simple docstring'''
# get the original timestep using init_timestep
lowercase__ = min(int(num_inference_steps * strength ), lowerCamelCase )
lowercase__ = max(num_inference_steps - init_timestep, 0 )
lowercase__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase__ ( self : Optional[Any], lowerCamelCase : List[Any], lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Any=None ):
'''simple docstring'''
if not isinstance(lowerCamelCase, (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase )}""" )
lowercase__ = image.to(device=lowerCamelCase, dtype=lowerCamelCase )
if isinstance(lowerCamelCase, lowerCamelCase ) and len(lowerCamelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCamelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowercase__ = init_latents.shape
lowercase__ = randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase )
# get latents
print('''add noise to latents at timestep''', lowerCamelCase )
lowercase__ = self.scheduler.add_noise(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = init_latents
return latents
@torch.no_grad()
def __call__( self : Dict, lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image] = None, lowerCamelCase : float = 0.8, lowerCamelCase : int = 1, lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None, lowerCamelCase : float = 0.0, lowerCamelCase : int = 50, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, ):
'''simple docstring'''
self.check_inputs(lowerCamelCase )
# 2. Preprocess image
lowercase__ = preprocess(lowerCamelCase )
# 3. set timesteps
self.scheduler.set_timesteps(lowerCamelCase, device=self.device )
lowercase__ , lowercase__ = self.get_timesteps(lowerCamelCase, lowerCamelCase, self.device )
lowercase__ = timesteps[:1].repeat(lowerCamelCase )
# 4. Prepare latent variables
lowercase__ = self.prepare_latents(lowerCamelCase, lowerCamelCase, lowerCamelCase, self.unet.dtype, self.device, lowerCamelCase )
lowercase__ = latents
# 5. Denoising loop
for t in self.progress_bar(lowerCamelCase ):
# 1. predict noise model_output
lowercase__ = self.unet(lowerCamelCase, lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase__ = self.scheduler.step(
lowerCamelCase, lowerCamelCase, lowerCamelCase, eta=lowerCamelCase, use_clipped_model_output=lowerCamelCase, generator=lowerCamelCase, ).prev_sample
lowercase__ = (image / 2 + 0.5).clamp(0, 1 )
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowerCamelCase )
| 183
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""ConvNextFeatureExtractor"""]
__a = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 689
|
import os
import sys
import unittest
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__a = os.path.join(git_repo_path, """src""", """diffusers""")
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A =find_backend(" if not is_torch_available():" )
self.assertEqual(snake_case__ , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A =find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A =find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers_and_onnx" )
def _a ( self : List[Any] ):
"""simple docstring"""
A =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , snake_case__ )
self.assertIn("torch_and_transformers" , snake_case__ )
self.assertIn("flax_and_transformers" , snake_case__ )
self.assertIn("torch_and_transformers_and_onnx" , snake_case__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def _a ( self : Dict ):
"""simple docstring"""
A =create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(snake_case__ , "\nCONSTANT = None\n" )
A =create_dummy_object("function" , "'torch'" )
self.assertEqual(
snake_case__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
A ="\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
A =create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(snake_case__ , snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A ="# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
A =create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , snake_case__ )
| 689
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = '▁'
a_ = {'vocab_file': 'sentencepiece.bpe.model'}
a_ = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
a_ = {
'xlm-roberta-base': 5_1_2,
'xlm-roberta-large': 5_1_2,
'xlm-roberta-large-finetuned-conll02-dutch': 5_1_2,
'xlm-roberta-large-finetuned-conll02-spanish': 5_1_2,
'xlm-roberta-large-finetuned-conll03-english': 5_1_2,
'xlm-roberta-large-finetuned-conll03-german': 5_1_2,
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =VOCAB_FILES_NAMES
UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__lowercase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__lowercase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__lowercase : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__lowercase : List[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowercase : Tuple = 1
__lowercase : Any = len(self.sp_model ) + self.fairseq_offset
__lowercase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Optional[Any]:
__lowercase : int = self.__dict__.copy()
__lowercase : int = None
__lowercase : Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ) -> Tuple:
__lowercase : List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowercase : str = {}
__lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase : Dict = [self.cls_token_id]
__lowercase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]:
__lowercase : Optional[Any] = [self.sep_token_id]
__lowercase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCamelCase ( self ) -> Dict:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _lowerCamelCase ( self ) -> str:
__lowercase : List[str] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]:
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> str:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowercase : Optional[Any] = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict:
__lowercase : Tuple = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip()
return out_string
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase : List[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
__lowercase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 76
|
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False, False, False
@dataclass
class UpperCAmelCase :
UpperCAmelCase = None
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = None
# Automatically constructed
UpperCAmelCase = "dict"
UpperCAmelCase = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCAmelCase = field(default="Audio" , init=_snake_case , repr=_snake_case )
def __call__( self : Any ):
return self.pa_type
def __SCREAMING_SNAKE_CASE ( self : Dict , __lowerCamelCase : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"bytes": None, "path": value}
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase__ :int = BytesIO()
sf.write(__lowerCamelCase , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase__ :List[Any] = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
UpperCAmelCase__ :Optional[Any] = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_2_7_6_7
UpperCAmelCase__ :Optional[Any] = BytesIO(bytes() )
sf.write(__lowerCamelCase , __lowerCamelCase , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCamelCase : dict , __lowerCamelCase : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
UpperCAmelCase__ , UpperCAmelCase__ :str = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
UpperCAmelCase__ :List[str] = xsplitext(__lowerCamelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
UpperCAmelCase__ :Optional[Any] = token_per_repo_id or {}
UpperCAmelCase__ :str = path.split('''::''' )[-1]
try:
UpperCAmelCase__ :Tuple = string_to_dict(__lowerCamelCase , config.HUB_DATASETS_URL )['''repo_id''']
UpperCAmelCase__ :str = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase__ :Tuple = None
with xopen(__lowerCamelCase , '''rb''' , use_auth_token=__lowerCamelCase ) as f:
UpperCAmelCase__ , UpperCAmelCase__ :Union[str, Any] = sf.read(__lowerCamelCase )
else:
UpperCAmelCase__ , UpperCAmelCase__ :List[Any] = sf.read(__lowerCamelCase )
UpperCAmelCase__ :Optional[int] = array.T
if self.mono:
UpperCAmelCase__ :Any = librosa.to_mono(__lowerCamelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase__ :Union[str, Any] = librosa.resample(__lowerCamelCase , orig_sr=__lowerCamelCase , target_sr=self.sampling_rate )
UpperCAmelCase__ :List[str] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def __SCREAMING_SNAKE_CASE ( self : Any , __lowerCamelCase : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
UpperCAmelCase__ :List[str] = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
UpperCAmelCase__ :Tuple = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase__ :str = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
UpperCAmelCase__ :int = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
UpperCAmelCase__ :Any = pa.array([Audio().encode_example(__lowerCamelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
UpperCAmelCase__ :str = storage.field('''bytes''' )
else:
UpperCAmelCase__ :List[str] = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
UpperCAmelCase__ :Optional[int] = storage.field('''path''' )
else:
UpperCAmelCase__ :Optional[int] = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
UpperCAmelCase__ :List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCamelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__lowerCamelCase : Dict ):
with xopen(__lowerCamelCase , '''rb''' ) as f:
UpperCAmelCase__ :Any = f.read()
return bytes_
UpperCAmelCase__ :Union[str, Any] = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase__ :Optional[int] = pa.array(
[os.path.basename(__lowerCamelCase ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
UpperCAmelCase__ :Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
| 467
| 0
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_UpperCAmelCase : Optional[int] = """pt"""
elif is_tf_available():
_UpperCAmelCase : str = """tf"""
else:
_UpperCAmelCase : Tuple = """jax"""
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = PerceiverTokenizer
UpperCAmelCase__ = False
def A_ ( self : Optional[int] ) -> Any:
super().setUp()
lowerCamelCase__ : Optional[int] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A_ ( self : int ) -> Optional[int]:
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def A_ ( self : Optional[int] , **UpperCAmelCase : int ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A_ ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : Any=False , UpperCAmelCase : List[str]=20 , UpperCAmelCase : List[str]=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCamelCase__ : Optional[int] = []
for i in range(len(UpperCAmelCase ) ):
try:
lowerCamelCase__ : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : int = list(filter(lambda UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , UpperCAmelCase ) )
lowerCamelCase__ : Tuple = list(filter(lambda UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCAmelCase ) , UpperCAmelCase ) )
if max_length is not None and len(UpperCAmelCase ) > max_length:
lowerCamelCase__ : Optional[int] = toks[:max_length]
if min_length is not None and len(UpperCAmelCase ) < min_length and len(UpperCAmelCase ) > 0:
while len(UpperCAmelCase ) < min_length:
lowerCamelCase__ : Optional[Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : Any = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : List[str] = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
if " " not in output_txt and len(UpperCAmelCase ) > 1:
lowerCamelCase__ : Union[str, Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCAmelCase )
)
if with_prefix_space:
lowerCamelCase__ : Union[str, Any] = ' ' + output_txt
lowerCamelCase__ : Union[str, Any] = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
return output_txt, output_ids
def A_ ( self : List[str] ) -> Tuple:
lowerCamelCase__ : Dict = self.perceiver_tokenizer
lowerCamelCase__ : Tuple = 'Unicode €.'
lowerCamelCase__ : List[Any] = tokenizer(UpperCAmelCase )
lowerCamelCase__ : Dict = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , UpperCAmelCase )
# decoding
lowerCamelCase__ : Any = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , '[CLS]Unicode €.[SEP]' )
lowerCamelCase__ : Dict = tokenizer('e è é ê ë' )
lowerCamelCase__ : Union[str, Any] = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , UpperCAmelCase )
# decoding
lowerCamelCase__ : Any = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def A_ ( self : List[str] ) -> Optional[Any]:
lowerCamelCase__ : Optional[Any] = self.perceiver_tokenizer
lowerCamelCase__ : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCamelCase__ : Tuple = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
lowerCamelCase__ : Optional[int] = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
if FRAMEWORK != "jax":
lowerCamelCase__ : List[str] = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A_ ( self : List[str] ) -> str:
lowerCamelCase__ : Tuple = self.perceiver_tokenizer
lowerCamelCase__ : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase__ : Dict = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCAmelCase )
self.assertIn('attention_mask' , UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , UpperCAmelCase )
def A_ ( self : str ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = self.perceiver_tokenizer
lowerCamelCase__ : int = [
'Summary of the text.',
'Another summary.',
]
lowerCamelCase__ : Optional[int] = tokenizer(
text_target=UpperCAmelCase , max_length=32 , padding='max_length' , truncation=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def A_ ( self : str ) -> List[Any]:
# safety check on max_len default value so we are sure the test works
lowerCamelCase__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : List[str] = tempfile.mkdtemp()
lowerCamelCase__ : Any = ' He is very happy, UNwant\u00E9d,running'
lowerCamelCase__ : Optional[int] = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer.__class__.from_pretrained(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
shutil.rmtree(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Optional[Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCamelCase__ : Any = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCamelCase__ : Optional[int] = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = tokenizer.__class__.from_pretrained(UpperCAmelCase )
lowerCamelCase__ : List[Any] = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase__ : Union[str, Any] = tokenizer.__class__.from_pretrained(UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCAmelCase )
def A_ ( self : Dict ) -> List[str]:
lowerCamelCase__ : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
lowerCamelCase__ : str = json.load(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
lowerCamelCase__ : int = json.load(UpperCAmelCase )
lowerCamelCase__ : Dict = [F"""<extra_id_{i}>""" for i in range(125 )]
lowerCamelCase__ : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCamelCase__ : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Tuple = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCAmelCase )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
UpperCAmelCase , additional_special_tokens=UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def A_ ( self : str ) -> Tuple:
lowerCamelCase__ : Dict = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def A_ ( self : str ) -> str:
pass
def A_ ( self : Optional[Any] ) -> Union[str, Any]:
pass
def A_ ( self : List[str] ) -> Optional[Any]:
pass
def A_ ( self : Dict ) -> Any:
pass
def A_ ( self : Tuple ) -> int:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowerCamelCase__ : Tuple = self.get_tokenizers(fast=UpperCAmelCase , do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase__ : str = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
lowerCamelCase__ : Optional[int] = tokenizer.convert_tokens_to_string(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
| 188
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """donut-swin"""
UpperCAmelCase__ = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Tuple , UpperCAmelCase : List[str]=224 , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=96 , UpperCAmelCase : int=[2, 2, 6, 2] , UpperCAmelCase : Dict=[3, 6, 12, 24] , UpperCAmelCase : Any=7 , UpperCAmelCase : Optional[int]=4.0 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : int=0.1 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : List[str]=False , UpperCAmelCase : str=0.0_2 , UpperCAmelCase : str=1e-5 , **UpperCAmelCase : Tuple , ) -> int:
super().__init__(**UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = image_size
lowerCamelCase__ : Optional[int] = patch_size
lowerCamelCase__ : int = num_channels
lowerCamelCase__ : int = embed_dim
lowerCamelCase__ : Optional[Any] = depths
lowerCamelCase__ : Optional[int] = len(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = num_heads
lowerCamelCase__ : Tuple = window_size
lowerCamelCase__ : Dict = mlp_ratio
lowerCamelCase__ : str = qkv_bias
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : str = drop_path_rate
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : str = use_absolute_embeddings
lowerCamelCase__ : List[Any] = layer_norm_eps
lowerCamelCase__ : Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ : Optional[int] = int(embed_dim * 2 ** (len(UpperCAmelCase ) - 1) )
| 188
| 1
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
SCREAMING_SNAKE_CASE__ = True
except (ImportError, AttributeError):
SCREAMING_SNAKE_CASE__ = object
def lowerCamelCase ( *_snake_case : int ,**_snake_case : Dict ):
'''simple docstring'''
pass
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = logging.get_logger("transformers-cli/serving")
def lowerCamelCase ( _snake_case : Namespace ):
'''simple docstring'''
lowercase__ = pipeline(
task=args.task ,model=args.model if args.model else None ,config=args.config ,tokenizer=args.tokenizer ,device=args.device ,)
return ServeCommand(_snake_case ,args.host ,args.port ,args.workers )
class snake_case (UpperCamelCase ):
lowerCAmelCase__ :dict
class snake_case (UpperCamelCase ):
lowerCAmelCase__ :List[str]
lowerCAmelCase__ :Optional[List[int]]
class snake_case (UpperCamelCase ):
lowerCAmelCase__ :str
class snake_case (UpperCamelCase ):
lowerCAmelCase__ :Any
class snake_case (UpperCamelCase ):
@staticmethod
def _a ( UpperCAmelCase_ ) -> List[str]:
lowercase__ = parser.add_parser(
"serve" ,help="CLI tool to run inference requests through REST and GraphQL endpoints." )
serve_parser.add_argument(
"--task" ,type=UpperCAmelCase_ ,choices=get_supported_tasks() ,help="The task to run the pipeline on" ,)
serve_parser.add_argument("--host" ,type=UpperCAmelCase_ ,default="localhost" ,help="Interface the server will listen on." )
serve_parser.add_argument("--port" ,type=UpperCAmelCase_ ,default=8_888 ,help="Port the serving will listen to." )
serve_parser.add_argument("--workers" ,type=UpperCAmelCase_ ,default=1 ,help="Number of http workers" )
serve_parser.add_argument("--model" ,type=UpperCAmelCase_ ,help="Model's name or path to stored model." )
serve_parser.add_argument("--config" ,type=UpperCAmelCase_ ,help="Model's config name or path to stored model." )
serve_parser.add_argument("--tokenizer" ,type=UpperCAmelCase_ ,help="Tokenizer name to use." )
serve_parser.add_argument(
"--device" ,type=UpperCAmelCase_ ,default=-1 ,help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" ,)
serve_parser.set_defaults(func=UpperCAmelCase_ )
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Optional[int]:
lowercase__ = pipeline
lowercase__ = host
lowercase__ = port
lowercase__ = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately." )
else:
logger.info(F'''Serving model over {host}:{port}''' )
lowercase__ = FastAPI(
routes=[
APIRoute(
"/" ,self.model_info ,response_model=UpperCAmelCase_ ,response_class=UpperCAmelCase_ ,methods=["GET"] ,),
APIRoute(
"/tokenize" ,self.tokenize ,response_model=UpperCAmelCase_ ,response_class=UpperCAmelCase_ ,methods=["POST"] ,),
APIRoute(
"/detokenize" ,self.detokenize ,response_model=UpperCAmelCase_ ,response_class=UpperCAmelCase_ ,methods=["POST"] ,),
APIRoute(
"/forward" ,self.forward ,response_model=UpperCAmelCase_ ,response_class=UpperCAmelCase_ ,methods=["POST"] ,),
] ,timeout=600 ,)
def _a ( self ) -> Optional[int]:
run(self._app ,host=self.host ,port=self.port ,workers=self.workers )
def _a ( self ) -> Any:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def _a ( self ,UpperCAmelCase_ = Body(UpperCAmelCase_ ,embed=UpperCAmelCase_ ) ,UpperCAmelCase_ = Body(UpperCAmelCase_ ,embed=UpperCAmelCase_ ) ) -> Union[str, Any]:
try:
lowercase__ = self._pipeline.tokenizer.tokenize(UpperCAmelCase_ )
if return_ids:
lowercase__ = self._pipeline.tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
return ServeTokenizeResult(tokens=UpperCAmelCase_ ,tokens_ids=UpperCAmelCase_ )
else:
return ServeTokenizeResult(tokens=UpperCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=500 ,detail={"model": "", "error": str(UpperCAmelCase_ )} )
def _a ( self ,UpperCAmelCase_ = Body(UpperCAmelCase_ ,embed=UpperCAmelCase_ ) ,UpperCAmelCase_ = Body(UpperCAmelCase_ ,embed=UpperCAmelCase_ ) ,UpperCAmelCase_ = Body(UpperCAmelCase_ ,embed=UpperCAmelCase_ ) ,) -> Union[str, Any]:
try:
lowercase__ = self._pipeline.tokenizer.decode(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
return ServeDeTokenizeResult(model="" ,text=UpperCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=500 ,detail={"model": "", "error": str(UpperCAmelCase_ )} )
async def _a ( self ,UpperCAmelCase_=Body(UpperCAmelCase_ ,embed=UpperCAmelCase_ ) ) -> Any:
# Check we don't have empty string
if len(UpperCAmelCase_ ) == 0:
return ServeForwardResult(output=[] ,attention=[] )
try:
# Forward through the model
lowercase__ = self._pipeline(UpperCAmelCase_ )
return ServeForwardResult(output=UpperCAmelCase_ )
except Exception as e:
raise HTTPException(500 ,{"error": str(UpperCAmelCase_ )} )
| 267
|
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
SCREAMING_SNAKE_CASE__ = None
try:
import msvcrt
except ImportError:
SCREAMING_SNAKE_CASE__ = None
try:
import fcntl
except ImportError:
SCREAMING_SNAKE_CASE__ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
SCREAMING_SNAKE_CASE__ = OSError
# Data
# ------------------------------------------------
SCREAMING_SNAKE_CASE__ = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
SCREAMING_SNAKE_CASE__ = "3.0.12"
SCREAMING_SNAKE_CASE__ = None
def lowerCamelCase ( ):
'''simple docstring'''
global _logger
lowercase__ = _logger or logging.getLogger(__name__ )
return _logger
class snake_case (UpperCamelCase ):
def __init__( self ,UpperCAmelCase_ ) -> int:
lowercase__ = lock_file
return None
def __str__( self ) -> Union[str, Any]:
lowercase__ = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class snake_case :
def __init__( self ,UpperCAmelCase_ ) -> List[Any]:
lowercase__ = lock
return None
def __enter__( self ) -> Optional[int]:
return self.lock
def __exit__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> int:
self.lock.release()
return None
class snake_case :
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=-1 ,UpperCAmelCase_=None ) -> Tuple:
lowercase__ = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
lowercase__ = self.hash_filename_if_too_long(UpperCAmelCase_ ,UpperCAmelCase_ )
# The path to the lock file.
lowercase__ = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowercase__ = None
# The default timeout value.
lowercase__ = timeout
# We use this lock primarily for the lock counter.
lowercase__ = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowercase__ = 0
return None
@property
def _a ( self ) -> List[str]:
return self._lock_file
@property
def _a ( self ) -> Optional[int]:
return self._timeout
@timeout.setter
def _a ( self ,UpperCAmelCase_ ) -> Optional[Any]:
lowercase__ = float(UpperCAmelCase_ )
return None
def _a ( self ) -> Optional[Any]:
raise NotImplementedError()
def _a ( self ) -> Optional[int]:
raise NotImplementedError()
@property
def _a ( self ) -> Dict:
return self._lock_file_fd is not None
def _a ( self ,UpperCAmelCase_=None ,UpperCAmelCase_=0.05 ) -> Optional[Any]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
lowercase__ = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowercase__ = id(self )
lowercase__ = self._lock_file
lowercase__ = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(UpperCAmelCase_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowercase__ = max(0 ,self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def _a ( self ,UpperCAmelCase_=False ) -> List[Any]:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowercase__ = id(self )
lowercase__ = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
lowercase__ = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self ) -> Dict:
self.acquire()
return self
def __exit__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> int:
self.release()
return None
def __del__( self ) -> Union[str, Any]:
self.release(force=UpperCAmelCase_ )
return None
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> str:
lowercase__ = os.path.basename(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > max_length and max_length > 0:
lowercase__ = os.path.dirname(UpperCAmelCase_ )
lowercase__ = str(hash(UpperCAmelCase_ ) )
lowercase__ = filename[: max_length - len(UpperCAmelCase_ ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(UpperCAmelCase_ ,UpperCAmelCase_ )
else:
return path
class snake_case (UpperCamelCase ):
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=-1 ,UpperCAmelCase_=None ) -> Dict:
from .file_utils import relative_to_absolute_path
super().__init__(UpperCAmelCase_ ,timeout=UpperCAmelCase_ ,max_filename_length=UpperCAmelCase_ )
lowercase__ = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def _a ( self ) -> List[str]:
lowercase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowercase__ = os.open(self._lock_file ,UpperCAmelCase_ )
except OSError:
pass
else:
try:
msvcrt.locking(UpperCAmelCase_ ,msvcrt.LK_NBLCK ,1 )
except OSError:
os.close(UpperCAmelCase_ )
else:
lowercase__ = fd
return None
def _a ( self ) -> Any:
lowercase__ = self._lock_file_fd
lowercase__ = None
msvcrt.locking(UpperCAmelCase_ ,msvcrt.LK_UNLCK ,1 )
os.close(UpperCAmelCase_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class snake_case (UpperCamelCase ):
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=-1 ,UpperCAmelCase_=None ) -> int:
lowercase__ = os.statvfs(os.path.dirname(UpperCAmelCase_ ) ).f_namemax
super().__init__(UpperCAmelCase_ ,timeout=UpperCAmelCase_ ,max_filename_length=UpperCAmelCase_ )
def _a ( self ) -> List[str]:
lowercase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowercase__ = os.open(self._lock_file ,UpperCAmelCase_ )
try:
fcntl.flock(UpperCAmelCase_ ,fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(UpperCAmelCase_ )
else:
lowercase__ = fd
return None
def _a ( self ) -> int:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
lowercase__ = self._lock_file_fd
lowercase__ = None
fcntl.flock(UpperCAmelCase_ ,fcntl.LOCK_UN )
os.close(UpperCAmelCase_ )
return None
class snake_case (UpperCamelCase ):
def _a ( self ) -> Optional[Any]:
lowercase__ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowercase__ = os.open(self._lock_file ,UpperCAmelCase_ )
except OSError:
pass
else:
lowercase__ = fd
return None
def _a ( self ) -> Tuple:
os.close(self._lock_file_fd )
lowercase__ = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
SCREAMING_SNAKE_CASE__ = None
if msvcrt:
SCREAMING_SNAKE_CASE__ = WindowsFileLock
elif fcntl:
SCREAMING_SNAKE_CASE__ = UnixFileLock
else:
SCREAMING_SNAKE_CASE__ = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 267
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json",
"google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __lowercase ( A ):
'''simple docstring'''
_A : Optional[Any] = '''mobilenet_v1'''
def __init__( self : str , _a : Tuple=3 , _a : List[Any]=224 , _a : Any=1.0 , _a : List[Any]=8 , _a : Optional[int]="relu6" , _a : List[Any]=True , _a : Tuple=0.999 , _a : Optional[Any]=0.02 , _a : List[Any]=0.001 , **_a : Optional[Any] , ):
super().__init__(**UpperCAmelCase__ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = depth_multiplier
UpperCamelCase__ = min_depth
UpperCamelCase__ = hidden_act
UpperCamelCase__ = tf_padding
UpperCamelCase__ = classifier_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
class __lowercase ( A ):
'''simple docstring'''
_A : Union[str, Any] = version.parse('''1.11''' )
@property
def A_ ( self : int ):
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def A_ ( self : Optional[Any] ):
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def A_ ( self : int ):
return 1E-4
| 721
|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __lowercase ( A, A, A, unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = StableDiffusionControlNetImgaImgPipeline
_A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_A : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_A : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
_A : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A_ ( self : Optional[Any] ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
UpperCamelCase__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
UpperCamelCase__ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase__ = CLIPTextModel(_a )
UpperCamelCase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A_ ( self : Optional[Any] , _a : Optional[int] , _a : Optional[Any]=0 ):
if str(_a ).startswith('''mps''' ):
UpperCamelCase__ = torch.manual_seed(_a )
else:
UpperCamelCase__ = torch.Generator(device=_a ).manual_seed(_a )
UpperCamelCase__ = 2
UpperCamelCase__ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , )
UpperCamelCase__ = floats_tensor(control_image.shape , rng=random.Random(_a ) ).to(_a )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((64, 64) )
UpperCamelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def A_ ( self : Union[str, Any] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A_ ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def A_ ( self : Any ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class __lowercase ( A, A, unittest.TestCase ):
'''simple docstring'''
_A : int = StableDiffusionControlNetImgaImgPipeline
_A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_A : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_A : Optional[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def A_ ( self : Tuple ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_a : List[str] ):
if isinstance(_a , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
UpperCamelCase__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
UpperCamelCase__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
UpperCamelCase__ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase__ = CLIPTextModel(_a )
UpperCamelCase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ = MultiControlNetModel([controlneta, controlneta] )
UpperCamelCase__ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A_ ( self : Tuple , _a : Dict , _a : Optional[int]=0 ):
if str(_a ).startswith('''mps''' ):
UpperCamelCase__ = torch.manual_seed(_a )
else:
UpperCamelCase__ = torch.Generator(device=_a ).manual_seed(_a )
UpperCamelCase__ = 2
UpperCamelCase__ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
]
UpperCamelCase__ = floats_tensor(control_image[0].shape , rng=random.Random(_a ) ).to(_a )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((64, 64) )
UpperCamelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def A_ ( self : str ):
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = self.pipeline_class(**_a )
pipe.to(_a )
UpperCamelCase__ = 10.0
UpperCamelCase__ = 4
UpperCamelCase__ = self.get_dummy_inputs(_a )
UpperCamelCase__ = steps
UpperCamelCase__ = scale
UpperCamelCase__ = pipe(**_a )[0]
UpperCamelCase__ = self.get_dummy_inputs(_a )
UpperCamelCase__ = steps
UpperCamelCase__ = scale
UpperCamelCase__ = pipe(**_a , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
UpperCamelCase__ = self.get_dummy_inputs(_a )
UpperCamelCase__ = steps
UpperCamelCase__ = scale
UpperCamelCase__ = pipe(**_a , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
UpperCamelCase__ = self.get_dummy_inputs(_a )
UpperCamelCase__ = steps
UpperCamelCase__ = scale
UpperCamelCase__ = pipe(**_a , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def A_ ( self : int ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A_ ( self : int ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def A_ ( self : int ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def A_ ( self : Dict ):
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_a )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Dict ):
UpperCamelCase__ = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
UpperCamelCase__ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=_a , controlnet=_a )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_a )
UpperCamelCase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCamelCase__ = '''evil space-punk bird'''
UpperCamelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
UpperCamelCase__ = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
UpperCamelCase__ = pipe(
_a , _a , control_image=_a , generator=_a , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
UpperCamelCase__ = output.images[0]
assert image.shape == (512, 512, 3)
UpperCamelCase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 591
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
UpperCAmelCase__ = StableDiffusionInpaintPipeline
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ = frozenset([] )
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase_ : Optional[int] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_lowerCamelCase , )
UpperCamelCase_ : str =PNDMScheduler(skip_prk_steps=_lowerCamelCase )
torch.manual_seed(0 )
UpperCamelCase_ : Optional[int] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCamelCase_ : List[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
UpperCamelCase_ : Union[str, Any] =CLIPTextModel(_lowerCamelCase )
UpperCamelCase_ : Optional[int] =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase_ : Tuple ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :Optional[Any]=0 ):
'''simple docstring'''
UpperCamelCase_ : List[str] =floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
UpperCamelCase_ : Any =image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase_ : List[Any] =Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('RGB' ).resize((64, 64) )
UpperCamelCase_ : Dict =Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(_lowerCamelCase ).startswith('mps' ):
UpperCamelCase_ : Tuple =torch.manual_seed(_lowerCamelCase )
else:
UpperCamelCase_ : List[str] =torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
UpperCamelCase_ : Optional[Any] ={
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowerCamelCase_ ( self :Dict ):
'''simple docstring'''
UpperCamelCase_ : List[str] ="cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ : Optional[int] =self.get_dummy_components()
UpperCamelCase_ : Tuple =StableDiffusionInpaintPipeline(**_lowerCamelCase )
UpperCamelCase_ : Any =sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_ : List[Any] =self.get_dummy_inputs(_lowerCamelCase )
UpperCamelCase_ : List[Any] =sd_pipe(**_lowerCamelCase ).images
UpperCamelCase_ : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_ : List[Any] =np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : List[str] =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
UpperCamelCase_ : List[Any] =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
UpperCamelCase_ : Union[str, Any] =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
UpperCamelCase_ : Dict ="stabilityai/stable-diffusion-2-inpainting"
UpperCamelCase_ : List[str] =StableDiffusionInpaintPipeline.from_pretrained(_lowerCamelCase , safety_checker=_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
UpperCamelCase_ : List[Any] ="Face of a yellow cat, high resolution, sitting on a park bench"
UpperCamelCase_ : List[Any] =torch.manual_seed(0 )
UpperCamelCase_ : Tuple =pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , generator=_lowerCamelCase , output_type='np' , )
UpperCamelCase_ : Union[str, Any] =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
UpperCamelCase_ : List[str] =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
UpperCamelCase_ : Optional[int] =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
UpperCamelCase_ : Dict ="stabilityai/stable-diffusion-2-inpainting"
UpperCamelCase_ : Dict =StableDiffusionInpaintPipeline.from_pretrained(
_lowerCamelCase , torch_dtype=torch.floataa , safety_checker=_lowerCamelCase , )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
UpperCamelCase_ : str ="Face of a yellow cat, high resolution, sitting on a park bench"
UpperCamelCase_ : str =torch.manual_seed(0 )
UpperCamelCase_ : Dict =pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , generator=_lowerCamelCase , output_type='np' , )
UpperCamelCase_ : Tuple =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCamelCase_ ( self :Dict ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase_ : List[Any] =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
UpperCamelCase_ : Any =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
UpperCamelCase_ : Any ="stabilityai/stable-diffusion-2-inpainting"
UpperCamelCase_ : List[str] =PNDMScheduler.from_pretrained(_lowerCamelCase , subfolder='scheduler' )
UpperCamelCase_ : Tuple =StableDiffusionInpaintPipeline.from_pretrained(
_lowerCamelCase , safety_checker=_lowerCamelCase , scheduler=_lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase_ : Optional[int] ="Face of a yellow cat, high resolution, sitting on a park bench"
UpperCamelCase_ : Dict =torch.manual_seed(0 )
UpperCamelCase_ : int =pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=2 , output_type='np' , )
UpperCamelCase_ : Union[str, Any] =torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 357
|
"""simple docstring"""
def lowercase_ ( _lowercase : int ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError("only integers accepted as input" )
else:
UpperCAmelCase : Optional[int] = str(abs(_lowercase ) )
UpperCAmelCase : Union[str, Any] = [list(_lowercase ) for char in range(len(_lowercase ) )]
for index in range(len(_lowercase ) ):
num_transpositions[index].pop(_lowercase )
return max(
int("".join(list(_lowercase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 595
| 0
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __a :
__lowercase : torch.Tensor # [batch_size x 3]
__lowercase : torch.Tensor # [batch_size x 3]
__lowercase : torch.Tensor # [batch_size x 3]
__lowercase : torch.Tensor # [batch_size x 3]
__lowercase : int
__lowercase : int
__lowercase : float
__lowercase : float
__lowercase : Tuple[int]
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> torch.Tensor:
'''simple docstring'''
lowercase__: List[Any] = torch.arange(self.height * self.width )
lowercase__: Optional[Any] = torch.stack(
[
pixel_indices % self.width,
torch.div(__UpperCamelCase , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__ , *lowercase__: Optional[Any] = self.shape
lowercase__: str = int(np.prod(__UpperCamelCase ) )
lowercase__: Union[str, Any] = self.get_image_coords()
lowercase__: Optional[int] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
lowercase__: Optional[int] = self.get_camera_rays(__UpperCamelCase )
lowercase__: Union[str, Any] = rays.view(__UpperCamelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> torch.Tensor:
'''simple docstring'''
lowercase__ , *lowercase__ , lowercase__: List[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
lowercase__: List[Any] = coords.view(__UpperCamelCase , -1 , 2 )
lowercase__: Optional[Any] = self.resolution()
lowercase__: List[str] = self.fov()
lowercase__: Union[str, Any] = (flat.float() / (res - 1)) * 2 - 1
lowercase__: List[str] = fracs * torch.tan(fov / 2 )
lowercase__: str = fracs.view(__UpperCamelCase , -1 , 2 )
lowercase__: List[str] = (
self.z.view(__UpperCamelCase , 1 , 3 )
+ self.x.view(__UpperCamelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__UpperCamelCase , 1 , 3 ) * fracs[:, :, 1:]
)
lowercase__: Optional[Any] = directions / directions.norm(dim=-1 , keepdim=__UpperCamelCase )
lowercase__: List[str] = torch.stack(
[
torch.broadcast_to(self.origin.view(__UpperCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__UpperCamelCase , *__UpperCamelCase , 2 , 3 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> "DifferentiableProjectiveCamera":
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__UpperCamelCase , height=__UpperCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def snake_case_ ( snake_case ) -> DifferentiableProjectiveCamera:
lowercase__: List[str] = []
lowercase__: Tuple = []
lowercase__: List[str] = []
lowercase__: Optional[int] = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
lowercase__: List[str] = np.array([np.sin(_A ), np.cos(_A ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
lowercase__: Optional[Any] = -z * 4
lowercase__: Optional[int] = np.array([np.cos(_A ), -np.sin(_A ), 0.0] )
lowercase__: Any = np.cross(_A , _A )
origins.append(_A )
xs.append(_A )
ys.append(_A )
zs.append(_A )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_A , axis=0 ) ).float() , x=torch.from_numpy(np.stack(_A , axis=0 ) ).float() , y=torch.from_numpy(np.stack(_A , axis=0 ) ).float() , z=torch.from_numpy(np.stack(_A , axis=0 ) ).float() , width=_A , height=_A , x_fov=0.7 , y_fov=0.7 , shape=(1, len(_A )) , )
| 719
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : int = PegasusTokenizer
__lowercase : int = PegasusTokenizerFast
__lowercase : Dict = True
__lowercase : Union[str, Any] = True
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__: Dict = PegasusTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Tuple = '</s>'
lowercase__: Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(lowerCAmelCase__ ) , 1_103 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_103 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__: List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__: Any = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
lowercase__: Any = rust_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids[0]
lowercase__: str = py_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: Optional[int] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowercase__: Optional[int] = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
lowercase__: Any = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1]
lowercase__: int = tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ ).input_ids[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: Union[str, Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96_103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_024
lowercase__: Tuple = 'To ensure a smooth flow of bank resolutions.'
lowercase__: Tuple = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1]
lowercase__: Tuple = tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ ).input_ids[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: Optional[Any] = ['This is going to be way too long.' * 150, 'short example']
lowercase__: Optional[int] = ['not super long but more than 5 tokens', 'tiny']
lowercase__: str = self._large_tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' )
lowercase__: Tuple = self._large_tokenizer(
text_target=lowerCAmelCase__ , max_length=5 , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1_024)
assert batch.attention_mask.shape == (2, 1_024)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCAmelCase__ ) == 2 # input_ids, attention_mask.
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
# fmt: off
lowercase__: Union[str, Any] = {'input_ids': [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : Union[str, Any] = PegasusTokenizer
__lowercase : Union[str, Any] = PegasusTokenizerFast
__lowercase : int = True
__lowercase : Union[str, Any] = True
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__: Optional[Any] = PegasusTokenizer(lowerCAmelCase__ , offset=0 , mask_token_sent=lowerCAmelCase__ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__: List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__: List[Any] = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
lowercase__: Tuple = rust_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids[0]
lowercase__: str = py_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: Optional[Any] = ['This is going to be way too long.' * 1_000, 'short example']
lowercase__: Optional[Any] = ['not super long but more than 5 tokens', 'tiny']
lowercase__: Optional[int] = self._large_tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' )
lowercase__: Tuple = self._large_tokenizer(
text_target=lowerCAmelCase__ , max_length=5 , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4_096)
assert batch.attention_mask.shape == (2, 4_096)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCAmelCase__ ) == 2 # input_ids, attention_mask.
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
lowercase__: int = self._large_tokenizer(lowerCAmelCase__ ).input_ids
self.assertListEqual(
lowerCAmelCase__ , [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] , )
| 335
| 0
|
def _lowerCamelCase ( __A : str , __A : str ) -> str:
_UpperCAmelCase : int = len(__A )
_UpperCAmelCase : int = len(__A )
_UpperCAmelCase : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_UpperCAmelCase : list = []
for char_count in range(__A ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__A )
if __name__ == "__main__":
print(alternative_string_arrange('AB', 'XYZ'), end=' ')
| 485
|
import tensorflow as tf
from ...tf_utils import shape_list
class A_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , _A , _A , _A , _A , _A=1 , _A=False , **_A) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**_A)
_UpperCAmelCase : Dict = vocab_size
_UpperCAmelCase : Any = d_embed
_UpperCAmelCase : List[Any] = d_proj
_UpperCAmelCase : List[Any] = cutoffs + [vocab_size]
_UpperCAmelCase : str = [0] + self.cutoffs
_UpperCAmelCase : Union[str, Any] = div_val
_UpperCAmelCase : Any = self.cutoffs[0]
_UpperCAmelCase : Optional[Any] = len(self.cutoffs) - 1
_UpperCAmelCase : Tuple = self.shortlist_size + self.n_clusters
_UpperCAmelCase : List[Any] = keep_order
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : List[Any] = []
def snake_case__ ( self , _A) -> List[str]:
"""simple docstring"""
if self.n_clusters > 0:
_UpperCAmelCase : Union[str, Any] = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=_A , name='''cluster_weight''')
_UpperCAmelCase : Tuple = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=_A , name='''cluster_bias''')
if self.div_val == 1:
for i in range(len(self.cutoffs)):
if self.d_proj != self.d_embed:
_UpperCAmelCase : Optional[int] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=_A , name=f'''out_projs_._{i}''' , )
self.out_projs.append(_A)
else:
self.out_projs.append(_A)
_UpperCAmelCase : str = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=_A , name=f'''out_layers_._{i}_._weight''' , )
_UpperCAmelCase : Optional[Any] = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=_A , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias))
else:
for i in range(len(self.cutoffs)):
_UpperCAmelCase , _UpperCAmelCase : str = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase : Optional[Any] = self.d_embed // (self.div_val**i)
_UpperCAmelCase : Optional[int] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=_A , name=f'''out_projs_._{i}''')
self.out_projs.append(_A)
_UpperCAmelCase : Dict = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=_A , name=f'''out_layers_._{i}_._weight''' , )
_UpperCAmelCase : Any = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=_A , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias))
super().build(_A)
@staticmethod
def snake_case__ ( _A , _A , _A , _A=None) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = x
if proj is not None:
_UpperCAmelCase : Optional[int] = tf.einsum('''ibd,ed->ibe''' , _A , _A)
return tf.einsum('''ibd,nd->ibn''' , _A , _A) + b
@staticmethod
def snake_case__ ( _A , _A) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = shape_list(_A)
_UpperCAmelCase : int = tf.range(lp_size[0] , dtype=target.dtype)
_UpperCAmelCase : Optional[Any] = tf.stack([r, target] , 1)
return tf.gather_nd(_A , _A)
def snake_case__ ( self , _A , _A , _A=True , _A=False) -> int:
"""simple docstring"""
_UpperCAmelCase : Tuple = 0
if self.n_clusters == 0:
_UpperCAmelCase : int = self._logit(_A , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0])
if target is not None:
_UpperCAmelCase : Any = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_A , logits=_A)
_UpperCAmelCase : Union[str, Any] = tf.nn.log_softmax(_A , axis=-1)
else:
_UpperCAmelCase : Union[str, Any] = shape_list(_A)
_UpperCAmelCase : str = []
_UpperCAmelCase : Optional[Any] = tf.zeros(hidden_sizes[:2])
for i in range(len(self.cutoffs)):
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
_UpperCAmelCase : Any = (target >= l_idx) & (target < r_idx)
_UpperCAmelCase : str = tf.where(_A)
_UpperCAmelCase : Union[str, Any] = tf.boolean_mask(_A , _A) - l_idx
if self.div_val == 1:
_UpperCAmelCase : str = self.out_layers[0][0][l_idx:r_idx]
_UpperCAmelCase : Any = self.out_layers[0][1][l_idx:r_idx]
else:
_UpperCAmelCase : int = self.out_layers[i][0]
_UpperCAmelCase : int = self.out_layers[i][1]
if i == 0:
_UpperCAmelCase : Optional[Any] = tf.concat([cur_W, self.cluster_weight] , 0)
_UpperCAmelCase : Optional[int] = tf.concat([cur_b, self.cluster_bias] , 0)
_UpperCAmelCase : int = self._logit(_A , _A , _A , self.out_projs[0])
_UpperCAmelCase : int = tf.nn.log_softmax(_A)
out.append(head_logprob[..., : self.cutoffs[0]])
if target is not None:
_UpperCAmelCase : List[str] = tf.boolean_mask(_A , _A)
_UpperCAmelCase : Optional[Any] = self._gather_logprob(_A , _A)
else:
_UpperCAmelCase : List[str] = self._logit(_A , _A , _A , self.out_projs[i])
_UpperCAmelCase : Union[str, Any] = tf.nn.log_softmax(_A)
_UpperCAmelCase : Optional[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
_UpperCAmelCase : Optional[Any] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_A)
if target is not None:
_UpperCAmelCase : Optional[Any] = tf.boolean_mask(_A , _A)
_UpperCAmelCase : str = tf.boolean_mask(_A , _A)
_UpperCAmelCase : Optional[Any] = self._gather_logprob(_A , _A)
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_A , -cur_logprob , shape_list(_A))
_UpperCAmelCase : Optional[Any] = tf.concat(_A , axis=-1)
if target is not None:
if return_mean:
_UpperCAmelCase : Optional[int] = tf.reduce_mean(_A)
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_A)
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_A , name=self.name , aggregation='''mean''' if return_mean else '''''')
return out
| 485
| 1
|
"""simple docstring"""
def lowercase (SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> str:
if height >= 1:
move_tower(height - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
move_disk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
move_tower(height - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> int:
print('moving disk from' , SCREAMING_SNAKE_CASE_ , 'to' , SCREAMING_SNAKE_CASE_ )
def lowercase () -> List[Any]:
SCREAMING_SNAKE_CASE = int(input('Height of hanoi: ' ).strip() )
move_tower(SCREAMING_SNAKE_CASE_ , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main()
| 327
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__UpperCamelCase = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__UpperCamelCase = {'''facebook/blenderbot-3B''': 128}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : int = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : Optional[int] = BlenderbotTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> str:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCAmelCase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , lowerCAmelCase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , lowerCAmelCase__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**lowerCAmelCase__ )
setattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __A ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __A ( self , lowerCAmelCase__ ) -> List[str]:
SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else value
SCREAMING_SNAKE_CASE = value
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding:
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding:
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Dict:
return token_ids_a + [self.eos_token_id]
def __A ( self , lowerCAmelCase__ ) -> List[int]:
SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = ' '.join(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.encode(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 327
| 1
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Any=10 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : List[str]=32 * 4 , UpperCAmelCase_ : Optional[int]=32 * 6 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Any=32 , ):
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : Union[str, Any] = use_auxiliary_loss
SCREAMING_SNAKE_CASE : List[Any] = num_queries
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Tuple = min_size
SCREAMING_SNAKE_CASE : List[Any] = max_size
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : List[Any] = mask_feature_size
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__SCREAMING_SNAKE_CASE ) > 0.5
).float()
SCREAMING_SNAKE_CASE : Optional[Any] = (torch.rand((self.batch_size, self.num_labels) , device=__SCREAMING_SNAKE_CASE ) > 0.5).long()
SCREAMING_SNAKE_CASE : List[str] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _A ( self : Dict ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : str = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _A ( self : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Dict = output.encoder_hidden_states
SCREAMING_SNAKE_CASE : str = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) , config.decoder_config.decoder_layers )
def _A ( self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int=False ):
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = MaskFormerModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = model(__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _A ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[str] = MaskFormerForInstanceSegmentation(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
def comm_check_on_output(UpperCAmelCase_ : Tuple ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = model(__SCREAMING_SNAKE_CASE )
comm_check_on_output(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = model(
pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE )
comm_check_on_output(__SCREAMING_SNAKE_CASE )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCamelCase_ : Union[str, Any] = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCamelCase_ : Any = False
UpperCamelCase_ : Any = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Optional[Any] = False
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Dict = MaskFormerModelTester(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def _A ( self : List[str] ):
self.config_tester.run_common_tests()
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def _A ( self : Union[str, Any] ):
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def _A ( self : Dict ):
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def _A ( self : int ):
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def _A ( self : Optional[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`" )
def _A ( self : Optional[int] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _A ( self : List[str] ):
pass
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
@slow
def _A ( self : str ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
SCREAMING_SNAKE_CASE : Dict = MaskFormerModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Optional[Any] = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE : Dict = {
"pixel_values": torch.randn((2, 3, *size) , device=__SCREAMING_SNAKE_CASE ),
"mask_labels": torch.randn((2, 10, *size) , device=__SCREAMING_SNAKE_CASE ),
"class_labels": torch.zeros(2 , 10 , device=__SCREAMING_SNAKE_CASE ).long(),
}
SCREAMING_SNAKE_CASE : Optional[int] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = model(**__SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
def _A ( self : str ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = model(**__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.attentions is not None )
def _A ( self : Dict ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
SCREAMING_SNAKE_CASE : Tuple = self.all_model_classes[1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
SCREAMING_SNAKE_CASE : Tuple = model(__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Tuple = self.all_model_classes[1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
SCREAMING_SNAKE_CASE : int = model(__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE : List[Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
SCREAMING_SNAKE_CASE : Any = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
snake_case = 1e-4
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _A ( self : List[str] ):
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def _A ( self : str ):
SCREAMING_SNAKE_CASE : List[str] = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : Tuple = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 800, 1088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : int = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(__SCREAMING_SNAKE_CASE )
.eval()
)
SCREAMING_SNAKE_CASE : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : List[str] = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 800, 1088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**__SCREAMING_SNAKE_CASE )
# masks_queries_logits
SCREAMING_SNAKE_CASE : str = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE : Optional[int] = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
# class_queries_logits
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE : str = torch.tensor(
[
[1.6_512E00, -5.2_572E00, -3.3_519E00],
[3.6_169E-02, -5.9_025E00, -2.9_313E00],
[1.0_766E-04, -7.7_630E00, -5.1_263E00],
] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(__SCREAMING_SNAKE_CASE )
.eval()
)
SCREAMING_SNAKE_CASE : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : int = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 800, 1088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**__SCREAMING_SNAKE_CASE )
# masks_queries_logits
SCREAMING_SNAKE_CASE : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE : str = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
# class_queries_logits
SCREAMING_SNAKE_CASE : Tuple = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Union[str, Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(__SCREAMING_SNAKE_CASE )
.eval()
)
SCREAMING_SNAKE_CASE : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : str = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
SCREAMING_SNAKE_CASE : int = inputs["pixel_values"].to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = [el.to(__SCREAMING_SNAKE_CASE ) for el in inputs["mask_labels"]]
SCREAMING_SNAKE_CASE : int = [el.to(__SCREAMING_SNAKE_CASE ) for el in inputs["class_labels"]]
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
| 62
|
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__snake_case = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ : List[str] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase_ : Union[str, Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24
| 0
|
UpperCamelCase__ : List[str] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCamelCase__ : int = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCamelCase__ : Dict = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
assert len(str(_SCREAMING_SNAKE_CASE ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
SCREAMING_SNAKE_CASE_ = year // 100
SCREAMING_SNAKE_CASE_ = (5 * (century % 4) + 2) % 7
SCREAMING_SNAKE_CASE_ = year % 100
SCREAMING_SNAKE_CASE_ = centurian % 12
SCREAMING_SNAKE_CASE_ = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
SCREAMING_SNAKE_CASE_ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
SCREAMING_SNAKE_CASE_ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : List[str] = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __snake_case ( lowerCAmelCase__ ):
__lowerCAmelCase : Any = 'biogpt'
def __init__( self , _A=42384 , _A=1024 , _A=24 , _A=16 , _A=4096 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1024 , _A=0.0_2 , _A=1E-12 , _A=True , _A=True , _A=0.0 , _A=0.0 , _A=1 , _A=0 , _A=2 , **_A , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = scale_embedding
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = layerdrop
SCREAMING_SNAKE_CASE_ = activation_dropout
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A)
| 620
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE_ : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] ,__A : List[Any]=5_0265 ,__A : str=512 ,__A : Optional[int]=8 ,__A : Any=2048 ,__A : Tuple=16 ,__A : str=8 ,__A : int=2048 ,__A : List[str]=16 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : int=True ,__A : List[Any]=True ,__A : Tuple="gelu" ,__A : Any=512 ,__A : Dict=0.1 ,__A : Tuple=0.0 ,__A : int=0.0 ,__A : int=0.02 ,__A : Dict=1 ,__A : str=False ,__A : Dict=0 ,__A : Union[str, Any]=1 ,__A : Optional[int]=2 ,__A : List[str]=2 ,**__A : Tuple ,) -> Tuple:
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = d_model
_lowercase = encoder_ffn_dim
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,forced_eos_token_id=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
_lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super().outputs
else:
_lowercase = super(__A ,self ).outputs
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : Optional[int] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
# Generate decoder inputs
_lowercase = seq_length if not self.use_past else 1
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
_lowercase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_lowercase = dict(**__A ,**__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
_lowercase = common_inputs['decoder_input_ids'].shape[1]
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = decoder_seq_length + 3
_lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__A ,__A )] ,dim=1 )
_lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase , _lowercase = self.num_layers
_lowercase = min(__A ,__A )
_lowercase = max(__A ,__A ) - min_num_layers
_lowercase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
_lowercase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__A ,__A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def __UpperCAmelCase ( self : List[Any] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase = seqlen + 2
_lowercase , _lowercase = self.num_layers
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = common_inputs['attention_mask'].dtype
_lowercase = torch.cat(
[common_inputs['attention_mask'], torch.ones(__A ,__A ,dtype=__A )] ,dim=1 )
_lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def __UpperCAmelCase ( self : Any ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase = tokenizer.num_special_tokens_to_add(__A )
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_lowercase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowercase = dict(tokenizer(__A ,return_tensors=__A ) )
return common_inputs
def __UpperCAmelCase ( self : Dict ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
elif self.task == "causal-lm":
_lowercase = self._generate_dummy_inputs_for_causal_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
else:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
return common_inputs
def __UpperCAmelCase ( self : List[str] ,__A : Dict ,__A : Any ,__A : List[Any] ,__A : Tuple ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super()._flatten_past_key_values_(__A ,__A ,__A ,__A )
else:
_lowercase = super(__A ,self )._flatten_past_key_values_(
__A ,__A ,__A ,__A )
| 67
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class UpperCAmelCase__ :
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=512 , A__=16 , A__=2 , A__=0.02 , A__=False , A__=True , A__="None" , A__=3 , A__=4 , A__=None , ):
"""simple docstring"""
UpperCAmelCase_: str = parent
UpperCAmelCase_: Any = batch_size
UpperCAmelCase_: Union[str, Any] = seq_length
UpperCAmelCase_: Optional[int] = is_training
UpperCAmelCase_: List[Any] = use_input_mask
UpperCAmelCase_: List[str] = use_token_type_ids
UpperCAmelCase_: Any = use_labels
UpperCAmelCase_: Optional[Any] = vocab_size
UpperCAmelCase_: List[Any] = hidden_size
UpperCAmelCase_: int = num_hidden_layers
UpperCAmelCase_: Tuple = num_attention_heads
UpperCAmelCase_: Optional[Any] = intermediate_size
UpperCAmelCase_: Optional[Any] = hidden_act
UpperCAmelCase_: List[Any] = hidden_dropout_prob
UpperCAmelCase_: str = attention_probs_dropout_prob
UpperCAmelCase_: Tuple = max_position_embeddings
UpperCAmelCase_: List[Any] = type_vocab_size
UpperCAmelCase_: List[Any] = type_sequence_label_size
UpperCAmelCase_: Union[str, Any] = initializer_range
UpperCAmelCase_: Any = num_labels
UpperCAmelCase_: Tuple = num_choices
UpperCAmelCase_: str = relative_attention
UpperCAmelCase_: Optional[Any] = position_biased_input
UpperCAmelCase_: Any = pos_att_type
UpperCAmelCase_: Any = scope
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_: str = None
if self.use_input_mask:
UpperCAmelCase_: List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_: List[str] = None
if self.use_token_type_ids:
UpperCAmelCase_: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_: int = None
UpperCAmelCase_: Dict = None
UpperCAmelCase_: Dict = None
if self.use_labels:
UpperCAmelCase_: Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_: Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_: Union[str, Any] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=A__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: int = TFDebertaVaModel(config=A__ )
UpperCAmelCase_: Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase_: Dict = [input_ids, input_mask]
UpperCAmelCase_: Union[str, Any] = model(A__ )
UpperCAmelCase_: Optional[int] = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = TFDebertaVaForMaskedLM(config=A__ )
UpperCAmelCase_: Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase_: List[Any] = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: Dict = self.num_labels
UpperCAmelCase_: Optional[Any] = TFDebertaVaForSequenceClassification(config=A__ )
UpperCAmelCase_: Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase_: Any = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = self.num_labels
UpperCAmelCase_: Any = TFDebertaVaForTokenClassification(config=A__ )
UpperCAmelCase_: int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase_: Optional[int] = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: List[str] = TFDebertaVaForQuestionAnswering(config=A__ )
UpperCAmelCase_: Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase_: Any = model(A__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
): Optional[Any] = config_and_inputs
UpperCAmelCase_: List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( snake_case__ , snake_case__ , unittest.TestCase ):
snake_case_ = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = TFDebertaVaModelTester(self )
UpperCAmelCase_: str = ConfigTester(self , config_class=A__ , hidden_size=37 )
def snake_case_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__ )
@slow
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[str] = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(A__ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def snake_case_ ( self ):
"""simple docstring"""
pass
@slow
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Tuple = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
UpperCAmelCase_: List[Any] = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCAmelCase_: Tuple = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCAmelCase_: Tuple = model(A__ , attention_mask=A__ )[0]
UpperCAmelCase_: Dict = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , A__ , atol=1E-4 )
| 137
| 0
|
from __future__ import annotations
import requests
_snake_case = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def lowerCamelCase_ ( A : str , A : int = 1 , A : str = "new" , A : list | None = None ):
"""simple docstring"""
lowerCAmelCase_ = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(A ) - valid_terms ) ):
lowerCAmelCase_ = F'Invalid search term: {invalid_search_terms}'
raise ValueError(A )
lowerCAmelCase_ = requests.get(
F'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 4_29:
raise requests.HTTPError
lowerCAmelCase_ = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(A )}
lowerCAmelCase_ = {}
for id_ in range(A ):
lowerCAmelCase_ = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 413
|
from math import pi
def lowerCamelCase_ ( A : int , A : int ):
"""simple docstring"""
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 413
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCamelCase_ ( A ):
"""simple docstring"""
UpperCAmelCase__ : torch.FloatTensor
class UpperCamelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCamelCase : Tuple=3 , _lowerCamelCase : int=3 , _lowerCamelCase : List[str]=("DownEncoderBlock2D",) , _lowerCamelCase : Dict=(64,) , _lowerCamelCase : str=2 , _lowerCamelCase : int=32 , _lowerCamelCase : Union[str, Any]="silu" , _lowerCamelCase : List[Any]=True , ) -> List[str]:
super().__init__()
__magic_name__ = layers_per_block
__magic_name__ = torch.nn.Convad(
_lowerCamelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__magic_name__ = None
__magic_name__ = nn.ModuleList([] )
# down
__magic_name__ = block_out_channels[0]
for i, down_block_type in enumerate(_lowerCamelCase ):
__magic_name__ = output_channel
__magic_name__ = block_out_channels[i]
__magic_name__ = i == len(_lowerCamelCase ) - 1
__magic_name__ = get_down_block(
_lowerCamelCase , num_layers=self.layers_per_block , in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=_lowerCamelCase , resnet_groups=_lowerCamelCase , attention_head_dim=_lowerCamelCase , temb_channels=_lowerCamelCase , )
self.down_blocks.append(_lowerCamelCase )
# mid
__magic_name__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_lowerCamelCase , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=_lowerCamelCase , temb_channels=_lowerCamelCase , )
# out
__magic_name__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_lowerCamelCase , eps=1e-6 )
__magic_name__ = nn.SiLU()
__magic_name__ = 2 * out_channels if double_z else out_channels
__magic_name__ = nn.Convad(block_out_channels[-1] , _lowerCamelCase , 3 , padding=1 )
__magic_name__ = False
def __A ( self : Tuple , _lowerCamelCase : Any ) -> Optional[int]:
__magic_name__ = x
__magic_name__ = self.conv_in(_lowerCamelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(_lowerCamelCase : Any ):
def custom_forward(*_lowerCamelCase : str ):
return module(*_lowerCamelCase )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(_lowerCamelCase ) , _lowerCamelCase , use_reentrant=_lowerCamelCase )
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _lowerCamelCase , use_reentrant=_lowerCamelCase )
else:
for down_block in self.down_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(_lowerCamelCase ) , _lowerCamelCase )
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , _lowerCamelCase )
else:
# down
for down_block in self.down_blocks:
__magic_name__ = down_block(_lowerCamelCase )
# middle
__magic_name__ = self.mid_block(_lowerCamelCase )
# post-process
__magic_name__ = self.conv_norm_out(_lowerCamelCase )
__magic_name__ = self.conv_act(_lowerCamelCase )
__magic_name__ = self.conv_out(_lowerCamelCase )
return sample
class UpperCamelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCamelCase : str=3 , _lowerCamelCase : int=3 , _lowerCamelCase : List[str]=("UpDecoderBlock2D",) , _lowerCamelCase : str=(64,) , _lowerCamelCase : Optional[Any]=2 , _lowerCamelCase : int=32 , _lowerCamelCase : Any="silu" , _lowerCamelCase : int="group" , ) -> Dict:
super().__init__()
__magic_name__ = layers_per_block
__magic_name__ = nn.Convad(
_lowerCamelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__magic_name__ = None
__magic_name__ = nn.ModuleList([] )
__magic_name__ = in_channels if norm_type == "spatial" else None
# mid
__magic_name__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_lowerCamelCase , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_lowerCamelCase , temb_channels=_lowerCamelCase , )
# up
__magic_name__ = list(reversed(_lowerCamelCase ) )
__magic_name__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(_lowerCamelCase ):
__magic_name__ = output_channel
__magic_name__ = reversed_block_out_channels[i]
__magic_name__ = i == len(_lowerCamelCase ) - 1
__magic_name__ = get_up_block(
_lowerCamelCase , num_layers=self.layers_per_block + 1 , in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=_lowerCamelCase , resnet_groups=_lowerCamelCase , attention_head_dim=_lowerCamelCase , temb_channels=_lowerCamelCase , resnet_time_scale_shift=_lowerCamelCase , )
self.up_blocks.append(_lowerCamelCase )
__magic_name__ = output_channel
# out
if norm_type == "spatial":
__magic_name__ = SpatialNorm(block_out_channels[0] , _lowerCamelCase )
else:
__magic_name__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_lowerCamelCase , eps=1e-6 )
__magic_name__ = nn.SiLU()
__magic_name__ = nn.Convad(block_out_channels[0] , _lowerCamelCase , 3 , padding=1 )
__magic_name__ = False
def __A ( self : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None ) -> Dict:
__magic_name__ = z
__magic_name__ = self.conv_in(_lowerCamelCase )
__magic_name__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_lowerCamelCase : List[str] ):
def custom_forward(*_lowerCamelCase : str ):
return module(*_lowerCamelCase )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _lowerCamelCase , _lowerCamelCase , use_reentrant=_lowerCamelCase )
__magic_name__ = sample.to(_lowerCamelCase )
# up
for up_block in self.up_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase , use_reentrant=_lowerCamelCase )
else:
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _lowerCamelCase , _lowerCamelCase )
__magic_name__ = sample.to(_lowerCamelCase )
# up
for up_block in self.up_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
else:
# middle
__magic_name__ = self.mid_block(_lowerCamelCase , _lowerCamelCase )
__magic_name__ = sample.to(_lowerCamelCase )
# up
for up_block in self.up_blocks:
__magic_name__ = up_block(_lowerCamelCase , _lowerCamelCase )
# post-process
if latent_embeds is None:
__magic_name__ = self.conv_norm_out(_lowerCamelCase )
else:
__magic_name__ = self.conv_norm_out(_lowerCamelCase , _lowerCamelCase )
__magic_name__ = self.conv_act(_lowerCamelCase )
__magic_name__ = self.conv_out(_lowerCamelCase )
return sample
class UpperCamelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Any=None , _lowerCamelCase : Tuple="random" , _lowerCamelCase : int=False , _lowerCamelCase : int=True ) -> List[str]:
super().__init__()
__magic_name__ = n_e
__magic_name__ = vq_embed_dim
__magic_name__ = beta
__magic_name__ = legacy
__magic_name__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__magic_name__ = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
__magic_name__ = self.used.shape[0]
__magic_name__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__magic_name__ = self.re_embed
__magic_name__ = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
__magic_name__ = n_e
__magic_name__ = sane_index_shape
def __A ( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> Any:
__magic_name__ = inds.shape
assert len(_lowerCamelCase ) > 1
__magic_name__ = inds.reshape(ishape[0] , -1 )
__magic_name__ = self.used.to(_lowerCamelCase )
__magic_name__ = (inds[:, :, None] == used[None, None, ...]).long()
__magic_name__ = match.argmax(-1 )
__magic_name__ = match.sum(2 ) < 1
if self.unknown_index == "random":
__magic_name__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__magic_name__ = self.unknown_index
return new.reshape(_lowerCamelCase )
def __A ( self : Optional[int] , _lowerCamelCase : List[str] ) -> int:
__magic_name__ = inds.shape
assert len(_lowerCamelCase ) > 1
__magic_name__ = inds.reshape(ishape[0] , -1 )
__magic_name__ = self.used.to(_lowerCamelCase )
if self.re_embed > self.used.shape[0]: # extra token
__magic_name__ = 0 # simply set to zero
__magic_name__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _lowerCamelCase )
return back.reshape(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : str ) -> Optional[Any]:
__magic_name__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
__magic_name__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__magic_name__ = torch.argmin(torch.cdist(_lowerCamelCase , self.embedding.weight ) , dim=1 )
__magic_name__ = self.embedding(_lowerCamelCase ).view(z.shape )
__magic_name__ = None
__magic_name__ = None
# compute loss for embedding
if not self.legacy:
__magic_name__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__magic_name__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__magic_name__ = z + (z_q - z).detach()
# reshape back to match original input shape
__magic_name__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__magic_name__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__magic_name__ = self.remap_to_used(_lowerCamelCase )
__magic_name__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__magic_name__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __A ( self : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] ) -> List[Any]:
if self.remap is not None:
__magic_name__ = indices.reshape(shape[0] , -1 ) # add batch axis
__magic_name__ = self.unmap_to_all(_lowerCamelCase )
__magic_name__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__magic_name__ = self.embedding(_lowerCamelCase )
if shape is not None:
__magic_name__ = z_q.view(_lowerCamelCase )
# reshape back to match original input shape
__magic_name__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __init__( self : str , _lowerCamelCase : Tuple , _lowerCamelCase : Dict=False ) -> List[Any]:
__magic_name__ = parameters
__magic_name__ , __magic_name__ = torch.chunk(_lowerCamelCase , 2 , dim=1 )
__magic_name__ = torch.clamp(self.logvar , -30.0 , 20.0 )
__magic_name__ = deterministic
__magic_name__ = torch.exp(0.5 * self.logvar )
__magic_name__ = torch.exp(self.logvar )
if self.deterministic:
__magic_name__ = __magic_name__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __A ( self : Any , _lowerCamelCase : Optional[torch.Generator] = None ) -> torch.FloatTensor:
__magic_name__ = randn_tensor(
self.mean.shape , generator=_lowerCamelCase , device=self.parameters.device , dtype=self.parameters.dtype )
__magic_name__ = self.mean + self.std * sample
return x
def __A ( self : List[Any] , _lowerCamelCase : List[Any]=None ) -> Any:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __A ( self : str , _lowerCamelCase : str , _lowerCamelCase : List[Any]=[1, 2, 3] ) -> List[str]:
if self.deterministic:
return torch.Tensor([0.0] )
__magic_name__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=_lowerCamelCase )
def __A ( self : Optional[Any] ) -> Tuple:
return self.mean
| 664
|
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Any=3 , lowerCAmelCase : Any=4 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[Any]=7 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : int=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=99 , lowerCAmelCase : Optional[int]=36 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Dict=4 , lowerCAmelCase : int=37 , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : str=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[Any]=5_12 , lowerCAmelCase : List[str]=16 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Dict=0.02 , lowerCAmelCase : Tuple=6 , lowerCAmelCase : Tuple=6 , lowerCAmelCase : Dict=3 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[Any]=10_00 , ) -> Dict:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = coordinate_size
lowercase__ = shape_size
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowercase__ = text_seq_length
lowercase__ = (image_size // patch_size) ** 2 + 1
lowercase__ = self.text_seq_length + self.image_seq_length
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size)
lowercase__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox)
lowercase__ = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase__ = bbox[i, j, 3]
lowercase__ = bbox[i, j, 1]
lowercase__ = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase__ = bbox[i, j, 2]
lowercase__ = bbox[i, j, 0]
lowercase__ = tmp_coordinate
lowercase__ = tf.constant(lowerCAmelCase)
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.text_seq_length])
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size)
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels)
lowercase__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = TFLayoutLMvaModel(config=lowerCAmelCase)
# text + image
lowercase__ = model(lowerCAmelCase , pixel_values=lowerCAmelCase , training=lowerCAmelCase)
lowercase__ = model(
lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , training=lowerCAmelCase , )
lowercase__ = model(lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , training=lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# text only
lowercase__ = model(lowerCAmelCase , training=lowerCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
lowercase__ = model({'pixel_values': pixel_values} , training=lowerCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size))
def UpperCAmelCase ( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFLayoutLMvaForSequenceClassification(config=lowerCAmelCase)
lowercase__ = model(
lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str) -> str:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFLayoutLMvaForTokenClassification(config=lowerCAmelCase)
lowercase__ = model(
lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels))
def UpperCAmelCase ( self : int , lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]) -> int:
"""simple docstring"""
lowercase__ = 2
lowercase__ = TFLayoutLMvaForQuestionAnswering(config=lowerCAmelCase)
lowercase__ = model(
lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , training=lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
((lowercase__), (lowercase__), (lowercase__), (lowercase__), (lowercase__), (lowercase__), (lowercase__), (lowercase__)) = config_and_inputs
lowercase__ = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Optional[Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
A : List[Any] = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
A : Any = False
A : Any = False
A : Dict = False
def UpperCAmelCase ( self : int , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict) -> Tuple:
"""simple docstring"""
return True
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str]=False) -> dict:
"""simple docstring"""
lowercase__ = copy.deepcopy(lowerCAmelCase)
if model_class in get_values(lowerCAmelCase):
lowercase__ = {
k: tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
if isinstance(lowerCAmelCase , tf.Tensor) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase):
lowercase__ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(lowerCAmelCase):
lowercase__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
lowercase__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(lowerCAmelCase):
lowercase__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(lowerCAmelCase):
lowercase__ = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa)
return inputs_dict
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
lowercase__ = TFLayoutLMvaModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37)
def UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCAmelCase)
if getattr(lowerCAmelCase , 'hf_compute_loss' , lowerCAmelCase):
# The number of elements in the loss should be the same as the number of elements in the label
lowercase__ = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase)
lowercase__ = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCAmelCase)[0]
]
lowercase__ = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
lowercase__ = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase)
lowercase__ = prepared_for_class.pop('input_ids')
lowercase__ = model(lowerCAmelCase , **lowerCAmelCase)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss when we mask some positions
lowercase__ = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase)
lowercase__ = prepared_for_class.pop('input_ids')
if "labels" in prepared_for_class:
lowercase__ = prepared_for_class['labels'].numpy()
if len(labels.shape) > 1 and labels.shape[1] != 1:
lowercase__ = -1_00
lowercase__ = tf.convert_to_tensor(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase , **lowerCAmelCase)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
self.assertTrue(not np.any(np.isnan(loss.numpy())))
# Test that model correctly compute the loss with a dict
lowercase__ = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss with a tuple
lowercase__ = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase)
# Get keys that were added with the _prepare_for_class function
lowercase__ = prepared_for_class.keys() - inputs_dict.keys()
lowercase__ = inspect.signature(model.call).parameters
lowercase__ = list(signature.keys())
# Create a dictionary holding the location of the tensors in the tuple
lowercase__ = {0: 'input_ids'}
for label_key in label_keys:
lowercase__ = signature_names.index(lowerCAmelCase)
lowercase__ = label_key
lowercase__ = sorted(tuple_index_mapping.items())
# Initialize a list with their default values, update the values and convert to a tuple
lowercase__ = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default)
for index, value in sorted_tuple_index_mapping:
lowercase__ = prepared_for_class[value]
lowercase__ = tuple(lowerCAmelCase)
# Send to model
lowercase__ = model(tuple_input[:-1])[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
def UpperCAmelCase ( self : str) -> Any:
"""simple docstring"""
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ = type
self.model_tester.create_and_check_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
@slow
def UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFLayoutLMvaModel.from_pretrained(lowerCAmelCase)
self.assertIsNotNone(lowerCAmelCase)
def _lowerCAmelCase ( ):
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase) if is_vision_available() else None
@slow
def UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
lowercase__ = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base')
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCAmelCase , return_tensors='tf').pixel_values
lowercase__ = tf.constant([[1, 2]])
lowercase__ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]) , axis=0)
# forward pass
lowercase__ = model(input_ids=lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , training=lowerCAmelCase)
# verify the logits
lowercase__ = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase)
lowercase__ = tf.constant(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]])
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase , atol=1E-4))
| 622
| 0
|
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : str = " " ) -> list:
__UpperCamelCase : str = []
__UpperCamelCase : str = 0
for index, char in enumerate(__lowerCAmelCase ):
if char == separator:
split_words.append(string[last_index:index] )
__UpperCamelCase : Any = index + 1
elif index + 1 == len(__lowerCAmelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 515
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class _A ( UpperCAmelCase_ ):
def __init__( self : Tuple , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : List[Any] ):
"""simple docstring"""
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
requires_backends(self , """decord""" )
self.check_model_type(lowerCamelCase__ )
def a ( self : Tuple , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Tuple=None ):
"""simple docstring"""
__UpperCamelCase : Tuple = {}
if frame_sampling_rate is not None:
__UpperCamelCase : Dict = frame_sampling_rate
if num_frames is not None:
__UpperCamelCase : Optional[int] = num_frames
__UpperCamelCase : Dict = {}
if top_k is not None:
__UpperCamelCase : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Any , lowerCamelCase__ : Union[str, List[str]] , **lowerCamelCase__ : Any ):
"""simple docstring"""
return super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
def a ( self : int , lowerCamelCase__ : str , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Tuple=1 ):
"""simple docstring"""
if num_frames is None:
__UpperCamelCase : Union[str, Any] = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
__UpperCamelCase : Tuple = BytesIO(requests.get(lowerCamelCase__ ).content )
__UpperCamelCase : Dict = VideoReader(lowerCamelCase__ )
videoreader.seek(0 )
__UpperCamelCase : Optional[int] = 0
__UpperCamelCase : Any = num_frames * frame_sampling_rate - 1
__UpperCamelCase : Dict = np.linspace(lowerCamelCase__ , lowerCamelCase__ , num=lowerCamelCase__ , dtype=np.intaa )
__UpperCamelCase : Any = videoreader.get_batch(lowerCamelCase__ ).asnumpy()
__UpperCamelCase : Dict = list(lowerCamelCase__ )
__UpperCamelCase : List[str] = self.image_processor(lowerCamelCase__ , return_tensors=self.framework )
return model_inputs
def a ( self : Dict , lowerCamelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase : str = self.model(**lowerCamelCase__ )
return model_outputs
def a ( self : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
__UpperCamelCase : Optional[Any] = self.model.config.num_labels
if self.framework == "pt":
__UpperCamelCase : str = model_outputs.logits.softmax(-1 )[0]
__UpperCamelCase , __UpperCamelCase : Any = probs.topk(lowerCamelCase__ )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
__UpperCamelCase : List[Any] = scores.tolist()
__UpperCamelCase : Optional[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase__ , lowerCamelCase__ )]
| 515
| 1
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : str = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : List[str] = XGLMTokenizer
__lowercase : Union[str, Any] = XGLMTokenizerFast
__lowercase : Optional[int] = True
__lowercase : List[str] = True
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case = XGLMTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 1008 )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = XGLMTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__snake_case = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__SCREAMING_SNAKE_CASE , f.name )
__snake_case = XGLMTokenizer(f.name , keep_accents=__SCREAMING_SNAKE_CASE )
__snake_case = pickle.dumps(__SCREAMING_SNAKE_CASE )
pickle.loads(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = '''Hello World!'''
__snake_case = [2, 3_1227, 4447, 35]
self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) )
@slow
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
__snake_case = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) )
@slow
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = {
'''input_ids''': [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''facebook/xglm-564M''' , padding=__SCREAMING_SNAKE_CASE , )
| 24
|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowerCamelCase = Lock()
def a__ ( UpperCamelCase_ : str, UpperCamelCase_ : Any, UpperCamelCase_ : Any, UpperCamelCase_ : int, UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str], UpperCamelCase_ : Optional[int] ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0, 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCamelCase_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase__ :Union[str, Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase__ :List[Any] = min(UpperCamelCase_, UpperCamelCase_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCamelCase_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase__ :List[str] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase__ :List[Any] = max(UpperCamelCase_, UpperCamelCase_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCamelCase_ )
def a__ ( UpperCamelCase_ : int ):
UpperCAmelCase__ :List[str] = []
UpperCAmelCase__ :Any = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase__ :str = Pipe()
UpperCAmelCase__ :Dict = Pipe()
process_array_.append(
Process(
target=UpperCamelCase_, args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]), ) )
UpperCAmelCase__ :Optional[Any] = temp_rs
UpperCAmelCase__ :Any = temp_rr
for i in range(1, len(UpperCamelCase_ ) - 1 ):
UpperCAmelCase__ :Optional[int] = Pipe()
UpperCAmelCase__ :List[str] = Pipe()
process_array_.append(
Process(
target=UpperCamelCase_, args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]), ) )
UpperCAmelCase__ :Optional[Any] = temp_rs
UpperCAmelCase__ :str = temp_rr
process_array_.append(
Process(
target=UpperCamelCase_, args=(
len(UpperCamelCase_ ) - 1,
arr[len(UpperCamelCase_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCamelCase_ ) - 1],
), ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0, len(UpperCamelCase_ ) ):
UpperCAmelCase__ :List[str] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def a__ ( ):
UpperCAmelCase__ :Optional[Any] = list(range(10, 0, -1 ) )
print('''Initial List''' )
print(*UpperCamelCase_ )
UpperCAmelCase__ :str = odd_even_transposition(UpperCamelCase_ )
print('''Sorted List\n''' )
print(*UpperCamelCase_ )
if __name__ == "__main__":
main()
| 467
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
lowercase_ = {
"""moussaKam/mbarthez""": 1_024,
"""moussaKam/barthez""": 1_024,
"""moussaKam/barthez-orangesum-title""": 1_024,
}
lowercase_ = """▁"""
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
_UpperCamelCase : int = BarthezTokenizer
def __init__( self : List[Any] , a : Union[str, Any]=None , a : Optional[Any]=None , a : Dict="<s>" , a : Union[str, Any]="</s>" , a : List[str]="</s>" , a : Optional[Any]="<s>" , a : int="<unk>" , a : str="<pad>" , a : Optional[int]="<mask>" , **a : Union[str, Any] , )-> Tuple:
"""simple docstring"""
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 45
| 1
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
A_ = get_logger()
A_ = None
class UpperCAmelCase ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
super().__init__(features=SCREAMING_SNAKE_CASE_ )
import jax
from jaxlib.xla_client import Device
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(SCREAMING_SNAKE_CASE_ )}, as `jaxlib.xla_extension.Device` '''
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
lowerCamelCase_ = device if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCamelCase_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
lowerCamelCase_ = str(jax.devices()[0] )
lowerCamelCase_ = jnp_array_kwargs
@staticmethod
def UpperCamelCase( ) -> str:
'''simple docstring'''
import jax
return {str(SCREAMING_SNAKE_CASE_ ): device for device in jax.devices()}
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE_ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(SCREAMING_SNAKE_CASE_ , axis=0 )
return column
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(SCREAMING_SNAKE_CASE_ , (str, bytes, type(SCREAMING_SNAKE_CASE_ )) ):
return value
elif isinstance(SCREAMING_SNAKE_CASE_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCamelCase_ = {}
if isinstance(SCREAMING_SNAKE_CASE_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowerCamelCase_ = {'dtype': jnp.intaa}
else:
lowerCamelCase_ = {'dtype': jnp.intaa}
elif isinstance(SCREAMING_SNAKE_CASE_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCamelCase_ = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ):
lowerCamelCase_ = np.asarray(SCREAMING_SNAKE_CASE_ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCamelCase_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(SCREAMING_SNAKE_CASE_ , **{**default_dtype, **self.jnp_array_kwargs} )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(SCREAMING_SNAKE_CASE_ , '__array__' ) and not isinstance(SCREAMING_SNAKE_CASE_ , jax.Array ):
lowerCamelCase_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
return self._tensorize(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return map_nested(self._recursive_tensorize , SCREAMING_SNAKE_CASE_ , map_list=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_ )
return self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_ , pa_table.column_names[0] )
lowerCamelCase_ = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self._consolidate(SCREAMING_SNAKE_CASE_ )
return column
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
for column_name in batch:
lowerCamelCase_ = self._consolidate(batch[column_name] )
return batch
| 42
|
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Dict ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
with open(__UpperCamelCase ) as metadata_file:
A_ = json.load(__UpperCamelCase )
A_ = LukeConfig(use_entity_aware_attention=__UpperCamelCase ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
A_ = torch.load(__UpperCamelCase ,map_location="cpu" )
# Load the entity vocab file
A_ = load_entity_vocab(__UpperCamelCase )
A_ = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("<ent>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase )
A_ = AddedToken("<ent2>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase ,LukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
A_ = LukeTokenizer.from_pretrained(__UpperCamelCase )
# Initialize the embeddings of the special tokens
A_ = state_dict["embeddings.word_embeddings.weight"]
A_ = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
A_ = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = f'''encoder.layer.{layer_index}.attention.self.'''
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["entity_embeddings.entity_embeddings.weight"]
A_ = entity_emb[entity_vocab["[MASK]"]]
A_ = LukeModel(config=__UpperCamelCase ).eval()
A_ , A_ = model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase )
if not (len(__UpperCamelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'''Missing keys {", ".join(__UpperCamelCase )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
f''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
A_ = LukeTokenizer.from_pretrained(__UpperCamelCase ,task="entity_classification" )
A_ = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
A_ = (39, 42)
A_ = tokenizer(__UpperCamelCase ,entity_spans=[span] ,add_prefix_space=__UpperCamelCase ,return_tensors="pt" )
A_ = model(**__UpperCamelCase )
# Verify word hidden states
if model_size == "large":
A_ = torch.Size((1, 42, 1024) )
A_ = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
A_ = torch.Size((1, 42, 768) )
A_ = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
A_ = torch.Size((1, 1, 1024) )
A_ = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
A_ = torch.Size((1, 1, 768) )
A_ = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__UpperCamelCase ) )
model.save_pretrained(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = {}
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f:
for index, line in enumerate(__UpperCamelCase ):
A_ , A_ = line.rstrip().split("\t" )
A_ = index
return entity_vocab
if __name__ == "__main__":
__a :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__a :Tuple = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 86
| 0
|
import unittest
import numpy as np
def _lowercase ( a_ : np.ndarray ,a_ : np.ndarray ,a_ : np.ndarray ,a_ : np.ndarray | None = None ,) -> np.ndarray:
'''simple docstring'''
__magic_name__ = np.shape(a_ )
__magic_name__ = np.shape(a_ )
__magic_name__ = np.shape(a_ )
if shape_a[0] != shape_b[0]:
__magic_name__ = (
'Expected the same number of rows for A and B. '
F'Instead found A of size {shape_a} and B of size {shape_b}'
)
raise ValueError(a_ )
if shape_b[1] != shape_c[1]:
__magic_name__ = (
'Expected the same number of columns for B and C. '
F'Instead found B of size {shape_b} and C of size {shape_c}'
)
raise ValueError(a_ )
__magic_name__ = pseudo_inv
if a_inv is None:
try:
__magic_name__ = np.linalg.inv(a_ )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __UpperCamelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
__magic_name__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__magic_name__ = np.array([[0, 3], [3, 0], [2, 3]] )
__magic_name__ = np.array([[2, 1], [6, 3]] )
__magic_name__ = schur_complement(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__magic_name__ = np.block([[a, b], [b.T, c]] )
__magic_name__ = np.linalg.det(__UpperCamelCase )
__magic_name__ = np.linalg.det(__UpperCamelCase )
__magic_name__ = np.linalg.det(__UpperCamelCase )
self.assertAlmostEqual(__UpperCamelCase , det_a * det_s )
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
__magic_name__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__magic_name__ = np.array([[0, 3], [3, 0], [2, 3]] )
__magic_name__ = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__UpperCamelCase ):
schur_complement(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
__magic_name__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__magic_name__ = np.array([[0, 3], [3, 0], [2, 3]] )
__magic_name__ = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__UpperCamelCase ):
schur_complement(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 184
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __UpperCamelCase ( SCREAMING_SNAKE_CASE ):
_lowercase : int = 0
_lowercase : bool = False
_lowercase : float = 3.0
class __UpperCamelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=__UpperCamelCase ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
__magic_name__ = GradScalerKwargs(init_scale=10_24 , growth_factor=2 )
AcceleratorState._reset_state()
__magic_name__ = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__magic_name__ = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 20_00 )
self.assertEqual(scaler._enabled , __UpperCamelCase )
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
'''simple docstring'''
__magic_name__ = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(__UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
A__ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
A__ = Accelerator(kwargs_handlers=[ddp_scaler])
A__ = torch.nn.Linear(100, 200)
A__ = accelerator.prepare(model)
# Check the values changed in kwargs
A__ = ""
A__ = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 184
| 1
|
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _SCREAMING_SNAKE_CASE (A ) -> Any:
"""simple docstring"""
lowercase__ = [False] * len(A )
lowercase__ = [-1] * len(A )
def dfs(A , A ):
lowercase__ = True
lowercase__ = c
for u in graph[v]:
if not visited[u]:
dfs(A , 1 - c )
for i in range(len(A ) ):
if not visited[i]:
dfs(A , 0 )
for i in range(len(A ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowerCamelCase : Optional[Any] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 460
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase : List[str] = []
def _SCREAMING_SNAKE_CASE (A , A , A ) -> bool:
"""simple docstring"""
for i in range(len(A ) ):
if board[row][i] == 1:
return False
for i in range(len(A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(A , -1 , -1 ) , range(A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(A , -1 , -1 ) , range(A , len(A ) ) ):
if board[i][j] == 1:
return False
return True
def _SCREAMING_SNAKE_CASE (A , A ) -> bool:
"""simple docstring"""
if row >= len(A ):
solution.append(A )
printboard(A )
print()
return True
for i in range(len(A ) ):
if is_safe(A , A , A ):
lowercase__ = 1
solve(A , row + 1 )
lowercase__ = 0
return False
def _SCREAMING_SNAKE_CASE (A ) -> None:
"""simple docstring"""
for i in range(len(A ) ):
for j in range(len(A ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
lowerCamelCase : Optional[Any] = 8
lowerCamelCase : Optional[int] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 460
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__a = logging.get_logger(__name__)
class __lowercase ( __snake_case ):
def __init__( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , **__lowerCamelCase : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase = feature_size
UpperCAmelCase = sampling_rate
UpperCAmelCase = padding_value
UpperCAmelCase = kwargs.pop("""padding_side""" , """right""" )
UpperCAmelCase = kwargs.pop("""return_attention_mask""" , __lowerCamelCase )
super().__init__(**__lowerCamelCase )
def _lowercase ( self : str , __lowerCamelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __lowerCamelCase : Union[bool, str, PaddingStrategy] = True , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
"""simple docstring"""
if isinstance(__lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
UpperCAmelCase = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
UpperCAmelCase = processed_features[self.model_input_names[0]]
UpperCAmelCase = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__lowerCamelCase ) == 0:
if return_attention_mask:
UpperCAmelCase = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
UpperCAmelCase = required_input[0]
if isinstance(__lowerCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
UpperCAmelCase = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__lowerCamelCase ):
UpperCAmelCase = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__lowerCamelCase ):
UpperCAmelCase = """tf"""
elif is_torch_tensor(__lowerCamelCase ):
UpperCAmelCase = """pt"""
elif isinstance(__lowerCamelCase , (int, float, list, tuple, np.ndarray) ):
UpperCAmelCase = """np"""
else:
raise ValueError(
F"""type of {first_element} unknown: {type(__lowerCamelCase )}. """
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
UpperCAmelCase = to_numpy(__lowerCamelCase )
else:
UpperCAmelCase = [to_numpy(__lowerCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
UpperCAmelCase = self._get_padding_strategies(padding=__lowerCamelCase , max_length=__lowerCamelCase )
UpperCAmelCase = processed_features[self.model_input_names[0]]
UpperCAmelCase = len(__lowerCamelCase )
if not all(len(__lowerCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
UpperCAmelCase = []
for i in range(__lowerCamelCase ):
UpperCAmelCase = {k: v[i] for k, v in processed_features.items()}
# truncation
UpperCAmelCase = self._truncate(
__lowerCamelCase , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , truncation=__lowerCamelCase , )
truncated_inputs.append(__lowerCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
UpperCAmelCase = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
UpperCAmelCase = PaddingStrategy.MAX_LENGTH
UpperCAmelCase = {}
for i in range(__lowerCamelCase ):
# padding
UpperCAmelCase = self._pad(
truncated_inputs[i] , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
UpperCAmelCase = []
if value.dtype is np.dtype(np.floataa ):
UpperCAmelCase = value.astype(np.floataa )
batch_outputs[key].append(__lowerCamelCase )
return BatchFeature(__lowerCamelCase , tensor_type=__lowerCamelCase )
def _lowercase ( self : Union[str, Any] , __lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ) -> dict:
"""simple docstring"""
UpperCAmelCase = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
UpperCAmelCase = len(__lowerCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__lowerCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
UpperCAmelCase = np.ones(len(__lowerCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
UpperCAmelCase = max_length - len(__lowerCamelCase )
if self.padding_side == "right":
if return_attention_mask:
UpperCAmelCase = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
UpperCAmelCase = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
UpperCAmelCase = np.pad(
__lowerCamelCase , __lowerCamelCase , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
UpperCAmelCase = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
UpperCAmelCase = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
UpperCAmelCase = np.pad(
__lowerCamelCase , __lowerCamelCase , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def _lowercase ( self : int , __lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ) -> Dict:
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
UpperCAmelCase = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase = len(__lowerCamelCase ) > max_length
if needs_to_be_truncated:
UpperCAmelCase = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
UpperCAmelCase = processed_features["""attention_mask"""][:max_length]
return processed_features
def _lowercase ( self : Union[str, Any] , __lowerCamelCase : Any=False , __lowerCamelCase : Any=None ) -> Dict:
"""simple docstring"""
if padding is not False:
if padding is True:
UpperCAmelCase = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCAmelCase = PaddingStrategy(__lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCAmelCase = padding
else:
UpperCAmelCase = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 719
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class __lowercase ( __snake_case ):
UpperCamelCase = '''nllb-moe'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , __lowerCamelCase : Optional[Any]=1_2_8_1_1_2 , __lowerCamelCase : Dict=1_0_2_4 , __lowerCamelCase : Optional[int]=1_2 , __lowerCamelCase : Union[str, Any]=4_0_9_6 , __lowerCamelCase : List[str]=1_6 , __lowerCamelCase : List[str]=1_2 , __lowerCamelCase : int=4_0_9_6 , __lowerCamelCase : Tuple=1_6 , __lowerCamelCase : str=0.05 , __lowerCamelCase : List[str]=0.05 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : str="relu" , __lowerCamelCase : Dict=1_0_2_4 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Any=False , __lowerCamelCase : Tuple="float32" , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=1_2_8 , __lowerCamelCase : List[str]=6_4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : str=0.001 , __lowerCamelCase : Optional[int]=0.001 , __lowerCamelCase : Tuple="all" , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : List[str]=1.0 , __lowerCamelCase : Dict=0.2 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : int=False , **__lowerCamelCase : str , ) -> int:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = router_z_loss_coef
UpperCAmelCase = router_aux_loss_coef
UpperCAmelCase = decoder_sparse_step
UpperCAmelCase = encoder_sparse_step
UpperCAmelCase = num_experts
UpperCAmelCase = expert_capacity
UpperCAmelCase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
UpperCAmelCase = router_dtype
UpperCAmelCase = router_ignore_padding_tokens
UpperCAmelCase = batch_prioritized_routing
UpperCAmelCase = second_expert_policy
UpperCAmelCase = normalize_router_prob_before_dropping
UpperCAmelCase = moe_eval_capacity_token_fraction
UpperCAmelCase = moe_token_dropout
UpperCAmelCase = output_router_logits
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
| 627
| 0
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _lowercase ( __lowerCamelCase : List[str] ,__lowerCamelCase : Any ) -> str:
'''simple docstring'''
assert isinstance(__lowerCAmelCase ,__lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' ,[False, True] )
def _lowercase ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : List[str] ,__lowerCamelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : str = tmp_path / """cache"""
UpperCamelCase__ : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ : List[str] = ParquetDatasetReader(__lowerCAmelCase ,cache_dir=__lowerCAmelCase ,keep_in_memory=__lowerCAmelCase ).read()
_check_parquet_dataset(__lowerCAmelCase ,__lowerCAmelCase )
@pytest.mark.parametrize(
'''features''' ,[
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] ,)
def _lowercase ( __lowerCamelCase : str ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = tmp_path / """cache"""
UpperCamelCase__ : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ : List[str] = features.copy() if features else default_expected_features
UpperCamelCase__ : Optional[Any] = (
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ : List[Any] = ParquetDatasetReader(__lowerCAmelCase ,features=__lowerCAmelCase ,cache_dir=__lowerCAmelCase ).read()
_check_parquet_dataset(__lowerCAmelCase ,__lowerCAmelCase )
@pytest.mark.parametrize('''split''' ,[None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _lowercase ( __lowerCamelCase : str ,__lowerCamelCase : int ,__lowerCamelCase : Tuple ) -> int:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = tmp_path / """cache"""
UpperCamelCase__ : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ : Optional[int] = ParquetDatasetReader(__lowerCAmelCase ,cache_dir=__lowerCAmelCase ,split=__lowerCAmelCase ).read()
_check_parquet_dataset(__lowerCAmelCase ,__lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' ,[str, list] )
def _lowercase ( __lowerCamelCase : Dict ,__lowerCamelCase : str ,__lowerCamelCase : int ) -> Dict:
'''simple docstring'''
if issubclass(__lowerCAmelCase ,__lowerCAmelCase ):
UpperCamelCase__ : Any = parquet_path
elif issubclass(__lowerCAmelCase ,__lowerCAmelCase ):
UpperCamelCase__ : str = [parquet_path]
UpperCamelCase__ : List[Any] = tmp_path / """cache"""
UpperCamelCase__ : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ : List[Any] = ParquetDatasetReader(__lowerCAmelCase ,cache_dir=__lowerCAmelCase ).read()
_check_parquet_dataset(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( __lowerCamelCase : Dict ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Optional[int]=("train",) ) -> List[Any]:
'''simple docstring'''
assert isinstance(__lowerCAmelCase ,__lowerCAmelCase )
for split in splits:
UpperCamelCase__ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' ,[False, True] )
def _lowercase ( __lowerCamelCase : List[str] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = tmp_path / """cache"""
UpperCamelCase__ : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ : Optional[int] = ParquetDatasetReader(
{'''train''': parquet_path} ,cache_dir=__lowerCAmelCase ,keep_in_memory=__lowerCAmelCase ).read()
_check_parquet_datasetdict(__lowerCAmelCase ,__lowerCAmelCase )
@pytest.mark.parametrize(
'''features''' ,[
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] ,)
def _lowercase ( __lowerCamelCase : Any ,__lowerCamelCase : Any ,__lowerCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ : List[Any] = tmp_path / """cache"""
UpperCamelCase__ : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ : int = features.copy() if features else default_expected_features
UpperCamelCase__ : List[str] = (
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ : Union[str, Any] = ParquetDatasetReader({'''train''': parquet_path} ,features=__lowerCAmelCase ,cache_dir=__lowerCAmelCase ).read()
_check_parquet_datasetdict(__lowerCAmelCase ,__lowerCAmelCase )
@pytest.mark.parametrize('''split''' ,[None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _lowercase ( __lowerCamelCase : List[str] ,__lowerCamelCase : Any ,__lowerCamelCase : Dict ) -> Tuple:
'''simple docstring'''
if split:
UpperCamelCase__ : List[Any] = {split: parquet_path}
else:
UpperCamelCase__ : Optional[int] = """train"""
UpperCamelCase__ : str = {"""train""": parquet_path, """test""": parquet_path}
UpperCamelCase__ : Union[str, Any] = tmp_path / """cache"""
UpperCamelCase__ : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ : Dict = ParquetDatasetReader(__lowerCAmelCase ,cache_dir=__lowerCAmelCase ).read()
_check_parquet_datasetdict(__lowerCAmelCase ,__lowerCAmelCase ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _lowercase ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = ParquetDatasetWriter(__lowerCAmelCase ,tmp_path / '''foo.parquet''' )
assert writer.write() > 0
UpperCamelCase__ : List[Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
UpperCamelCase__ : Tuple = pf.read()
assert dataset.data.table == output_table
def _lowercase ( __lowerCamelCase : List[str] ,__lowerCamelCase : str ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = str(shared_datadir / '''test_image_rgb.jpg''' )
UpperCamelCase__ : Tuple = {"""image""": [image_path]}
UpperCamelCase__ : Union[str, Any] = Features({'''image''': Image()} )
UpperCamelCase__ : List[Any] = Dataset.from_dict(__lowerCAmelCase ,features=__lowerCAmelCase )
UpperCamelCase__ : List[str] = ParquetDatasetWriter(__lowerCAmelCase ,tmp_path / '''foo.parquet''' )
assert writer.write() > 0
UpperCamelCase__ : List[str] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
UpperCamelCase__ : Optional[int] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) ,streaming=__lowerCAmelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' ,[
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] ,)
def _lowercase ( __lowerCamelCase : Tuple ,__lowerCamelCase : Tuple ) -> List[str]:
'''simple docstring'''
assert get_writer_batch_size(__lowerCAmelCase ) == expected
| 344
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
UpperCamelCase = logging.getLogger(__name__)
class _A ( UpperCAmelCase_ ):
def a ( self : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str=None , lowerCamelCase__ : int=None ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = self.layer[current_layer](lowerCamelCase__ , lowerCamelCase__ , head_mask[current_layer] )
__UpperCamelCase : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'''The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.''' , UpperCAmelCase_ , )
class _A ( UpperCAmelCase_ ):
def __init__( self : Tuple , lowerCamelCase__ : Tuple ):
"""simple docstring"""
super().__init__(lowerCamelCase__ )
__UpperCamelCase : List[str] = BertEncoderWithPabee(lowerCamelCase__ )
self.init_weights()
__UpperCamelCase : Optional[int] = 0
__UpperCamelCase : Tuple = 0
__UpperCamelCase : Any = 0
__UpperCamelCase : Optional[Any] = 0
def a ( self : Union[str, Any] , lowerCamelCase__ : Any ):
"""simple docstring"""
__UpperCamelCase : List[Any] = threshold
def a ( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : int = patience
def a ( self : Optional[int] ):
"""simple docstring"""
__UpperCamelCase : int = 0
__UpperCamelCase : Tuple = 0
def a ( self : int ):
"""simple docstring"""
__UpperCamelCase : int = self.inference_layers_num / self.inference_instances_num
__UpperCamelCase : str = (
f'*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='
f' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'
)
print(lowerCamelCase__ )
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
def a ( self : Any , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : str=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : str=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
__UpperCamelCase : Optional[Any] = input_ids.size()
elif inputs_embeds is not None:
__UpperCamelCase : Tuple = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
__UpperCamelCase : Tuple = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__UpperCamelCase : Optional[Any] = torch.ones(lowerCamelCase__ , device=lowerCamelCase__ )
if token_type_ids is None:
__UpperCamelCase : str = torch.zeros(lowerCamelCase__ , dtype=torch.long , device=lowerCamelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__UpperCamelCase : torch.Tensor = self.get_extended_attention_mask(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[Any] = encoder_hidden_states.size()
__UpperCamelCase : Union[str, Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__UpperCamelCase : Union[str, Any] = torch.ones(lowerCamelCase__ , device=lowerCamelCase__ )
__UpperCamelCase : Optional[int] = self.invert_attention_mask(lowerCamelCase__ )
else:
__UpperCamelCase : str = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__UpperCamelCase : int = self.get_head_mask(lowerCamelCase__ , self.config.num_hidden_layers )
__UpperCamelCase : Any = self.embeddings(
input_ids=lowerCamelCase__ , position_ids=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , inputs_embeds=lowerCamelCase__ )
__UpperCamelCase : Optional[int] = embedding_output
if self.training:
__UpperCamelCase : List[Any] = []
for i in range(self.config.num_hidden_layers ):
__UpperCamelCase : List[Any] = self.encoder.adaptive_forward(
lowerCamelCase__ , current_layer=lowerCamelCase__ , attention_mask=lowerCamelCase__ , head_mask=lowerCamelCase__ )
__UpperCamelCase : Tuple = self.pooler(lowerCamelCase__ )
__UpperCamelCase : str = output_layers[i](output_dropout(lowerCamelCase__ ) )
res.append(lowerCamelCase__ )
elif self.patience == 0: # Use all layers for inference
__UpperCamelCase : Union[str, Any] = self.encoder(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , head_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , )
__UpperCamelCase : Optional[int] = self.pooler(encoder_outputs[0] )
__UpperCamelCase : Union[str, Any] = [output_layers[self.config.num_hidden_layers - 1](lowerCamelCase__ )]
else:
__UpperCamelCase : Union[str, Any] = 0
__UpperCamelCase : Optional[Any] = None
__UpperCamelCase : List[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__UpperCamelCase : Union[str, Any] = self.encoder.adaptive_forward(
lowerCamelCase__ , current_layer=lowerCamelCase__ , attention_mask=lowerCamelCase__ , head_mask=lowerCamelCase__ )
__UpperCamelCase : str = self.pooler(lowerCamelCase__ )
__UpperCamelCase : Any = output_layers[i](lowerCamelCase__ )
if regression:
__UpperCamelCase : Optional[int] = logits.detach()
if patient_result is not None:
__UpperCamelCase : Dict = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__UpperCamelCase : List[str] = 0
else:
__UpperCamelCase : Optional[int] = logits.detach().argmax(dim=1 )
if patient_result is not None:
__UpperCamelCase : Union[str, Any] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(lowerCamelCase__ ) ):
patient_counter += 1
else:
__UpperCamelCase : str = 0
__UpperCamelCase : Union[str, Any] = logits
if patient_counter == self.patience:
break
__UpperCamelCase : List[str] = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'''Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. ''' , UpperCAmelCase_ , )
class _A ( UpperCAmelCase_ ):
def __init__( self : Any , lowerCamelCase__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(lowerCamelCase__ )
__UpperCamelCase : Dict = config.num_labels
__UpperCamelCase : List[Any] = BertModelWithPabee(lowerCamelCase__ )
__UpperCamelCase : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase : List[Any] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
def a ( self : List[Any] , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : Dict=None , ):
"""simple docstring"""
__UpperCamelCase : str = self.bert(
input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , position_ids=lowerCamelCase__ , head_mask=lowerCamelCase__ , inputs_embeds=lowerCamelCase__ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__UpperCamelCase : List[str] = (logits[-1],)
if labels is not None:
__UpperCamelCase : Tuple = None
__UpperCamelCase : Union[str, Any] = 0
for ix, logits_item in enumerate(lowerCamelCase__ ):
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase : Union[str, Any] = MSELoss()
__UpperCamelCase : Optional[Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase : Dict = CrossEntropyLoss()
__UpperCamelCase : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__UpperCamelCase : Dict = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__UpperCamelCase : Tuple = (total_loss / total_weights,) + outputs
return outputs
| 269
| 0
|
import logging
import os
from .state import PartialState
class lowerCamelCase ( logging.LoggerAdapter ):
@staticmethod
def snake_case_ ( __snake_case : Optional[Any] ) -> List[Any]:
_a : str = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def snake_case_ ( self : int , __snake_case : int , __snake_case : Dict , *__snake_case : Optional[Any] , **__snake_case : int ) -> Tuple:
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_a : List[str] = kwargs.pop('''main_process_only''' , __snake_case )
_a : int = kwargs.pop('''in_order''' , __snake_case )
if self.isEnabledFor(__snake_case ):
if self._should_log(__snake_case ):
_a : int = self.process(__snake_case , __snake_case )
self.logger.log(__snake_case , __snake_case , *__snake_case , **__snake_case )
elif in_order:
_a : Dict = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_a : str = self.process(__snake_case , __snake_case )
self.logger.log(__snake_case , __snake_case , *__snake_case , **__snake_case )
state.wait_for_everyone()
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ = None ):
if log_level is None:
_a : str = os.environ.get('''ACCELERATE_LOG_LEVEL''' , UpperCamelCase_ )
_a : int = logging.getLogger(UpperCamelCase_ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCamelCase_ , {} )
| 701
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__UpperCAmelCase : Optional[Any] = (3, 9, -11, 0, 7, 5, 1, -1)
__UpperCAmelCase : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowerCamelCase :
UpperCAmelCase : int
UpperCAmelCase : Node | None
class lowerCamelCase :
def __init__( self : Any , __snake_case : Iterable[int] ) -> None:
_a : Node | None = None
for i in sorted(__snake_case , reverse=__snake_case ):
_a : Tuple = Node(__snake_case , self.head )
def __iter__( self : Optional[int] ) -> Iterator[int]:
_a : str = self.head
while node:
yield node.data
_a : Tuple = node.next_node
def __len__( self : Optional[int] ) -> int:
return sum(1 for _ in self )
def __str__( self : Tuple ) -> str:
return " -> ".join([str(__snake_case ) for node in self] )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : Dict = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 249
| 0
|
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __lowercase ( unittest.TestCase ):
def UpperCamelCase__ ( self , UpperCamelCase ) -> str:
__a = 3
__a = 250
__a = ids_tensor((batch_size, length) , UpperCamelCase )
__a = torch.ones((batch_size, length) , device=UpperCamelCase , dtype=torch.float ) / length
return input_ids, scores
def UpperCamelCase__ ( self ) -> Dict:
__a , __a = self._get_tensors(5 )
__a = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase , UpperCamelCase ) )
__a , __a = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase , UpperCamelCase ) )
__a , __a = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase , UpperCamelCase ) )
def UpperCamelCase__ ( self ) -> str:
__a = MaxLengthCriteria(max_length=10 )
__a , __a = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase , UpperCamelCase ) )
__a , __a = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase , UpperCamelCase ) )
__a , __a = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase , UpperCamelCase ) )
def UpperCamelCase__ ( self ) -> Tuple:
__a = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__a , __a = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase , UpperCamelCase ) )
__a , __a = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase , UpperCamelCase ) )
__a , __a = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase , UpperCamelCase ) )
__a = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
__a , __a = self._get_tensors(5 )
__a = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase , UpperCamelCase ) )
__a = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase , UpperCamelCase ) )
def UpperCamelCase__ ( self ) -> Dict:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCamelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__a = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCamelCase ) , 1 )
| 539
|
'''simple docstring'''
class __lowercase : # Public class to implement a graph
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None:
__a = row
__a = col
__a = graph
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None:
# Checking all 8 elements surrounding nth element
__a = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__a = [-1, 0, 1, -1, 1, -1, 0, 1]
__a = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase )
def UpperCamelCase__ ( self ) -> int: # And finally, count all islands.
__a = [[False for j in range(self.COL )] for i in range(self.ROW )]
__a = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase , UpperCamelCase , UpperCamelCase )
count += 1
return count
| 539
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "BlipImageProcessor"
snake_case_ = "AutoTokenizer"
def __init__( self : Any , __snake_case : Union[str, Any] , __snake_case : Dict )-> str:
snake_case = False
super().__init__(__snake_case , __snake_case )
snake_case = self.image_processor
def __call__( self : Optional[Any] , __snake_case : ImageInput = None , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Any , )-> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
snake_case = self.tokenizer
snake_case = self.tokenizer(
text=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_token_type_ids=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
return text_encoding
# add pixel_values
snake_case = self.image_processor(__snake_case , return_tensors=__snake_case )
if text is not None:
snake_case = self.tokenizer(
text=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_token_type_ids=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
else:
snake_case = None
if text_encoding is not None:
encoding_image_processor.update(__snake_case )
return encoding_image_processor
def lowerCAmelCase ( self : List[str] , *__snake_case : Tuple , **__snake_case : str )-> Optional[int]:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : List[str] , *__snake_case : List[str] , **__snake_case : List[Any] )-> List[str]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase ( self : Any )-> Optional[int]:
snake_case = self.tokenizer.model_input_names
snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 720
|
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] )-> Optional[int]:
snake_case = inspect.getfile(accelerate.test_utils )
snake_case = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
snake_case = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCAmelCase ( self : int )-> List[str]:
snake_case = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
snake_case = [sys.executable] + distributed_args
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 517
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ : Dict = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 313
|
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __lowercase :
def __init__( self , lowercase_ , lowercase_=sys.maxsize) -> Optional[int]:
__snake_case = 'bilinear'
__snake_case = max_size
__snake_case = short_edge_length
def __call__( self , lowercase_) -> Optional[int]:
__snake_case = []
for img in imgs:
__snake_case , __snake_case = img.shape[:2]
# later: provide list and randomly choose index for resize
__snake_case = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
__snake_case = size * 1.0 / min(lowercase_ , lowercase_)
if h < w:
__snake_case , __snake_case = size, scale * w
else:
__snake_case , __snake_case = scale * h, size
if max(lowercase_ , lowercase_) > self.max_size:
__snake_case = self.max_size * 1.0 / max(lowercase_ , lowercase_)
__snake_case = newh * scale
__snake_case = neww * scale
__snake_case = int(neww + 0.5)
__snake_case = int(newh + 0.5)
if img.dtype == np.uinta:
__snake_case = Image.fromarray(lowercase_)
__snake_case = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
__snake_case = np.asarray(lowercase_)
else:
__snake_case = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
__snake_case = nn.functional.interpolate(
lowercase_ , (newh, neww) , mode=self.interp_method , align_corners=lowercase_).squeeze(0)
img_augs.append(lowercase_)
return img_augs
class __lowercase :
def __init__( self , lowercase_) -> Optional[int]:
__snake_case = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
__snake_case = cfg.INPUT.FORMAT
__snake_case = cfg.SIZE_DIVISIBILITY
__snake_case = cfg.PAD_VALUE
__snake_case = cfg.INPUT.MAX_SIZE_TEST
__snake_case = cfg.MODEL.DEVICE
__snake_case = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__snake_case = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__snake_case = lambda lowercase_: (x - self.pixel_mean) / self.pixel_std
def _a ( self , lowercase_) -> Optional[int]:
__snake_case = tuple(max(lowercase_) for s in zip(*[img.shape for img in images]))
__snake_case = [im.shape[-2:] for im in images]
__snake_case = [
nn.functional.pad(
lowercase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowercase_ , lowercase_)
]
return torch.stack(lowercase_), torch.tensor(lowercase_)
def __call__( self , lowercase_ , lowercase_=False) -> List[str]:
with torch.no_grad():
if not isinstance(lowercase_ , lowercase_):
__snake_case = [images]
if single_image:
assert len(lowercase_) == 1
for i in range(len(lowercase_)):
if isinstance(images[i] , torch.Tensor):
images.insert(lowercase_ , images.pop(lowercase_).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
lowercase_ , torch.as_tensor(img_tensorize(images.pop(lowercase_) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
__snake_case = torch.tensor([im.shape[:2] for im in images])
__snake_case = self.aug(lowercase_)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__snake_case = [self.normalizer(lowercase_) for x in images]
# now pad them to do the following operations
__snake_case , __snake_case = self.pad(lowercase_)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__snake_case = torch.true_divide(lowercase_ , lowercase_)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def A ( snake_case__ : Tuple , snake_case__ : List[Any] ) -> Any:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def A ( snake_case__ : Optional[int] , snake_case__ : Tuple[int, int] ) -> Dict:
'''simple docstring'''
assert torch.isfinite(snake_case__ ).all(), "Box tensor contains infinite or NaN!"
__snake_case , __snake_case = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case__ )
tensor[:, 1].clamp_(min=0 , max=snake_case__ )
tensor[:, 2].clamp_(min=0 , max=snake_case__ )
tensor[:, 3].clamp_(min=0 , max=snake_case__ )
| 313
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class A ( __UpperCAmelCase ):
__snake_case = 'roberta-prelayernorm'
def __init__( self, UpperCamelCase__=5_0265, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=512, UpperCamelCase__=2, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=1, UpperCamelCase__=0, UpperCamelCase__=2, UpperCamelCase__="absolute", UpperCamelCase__=True, UpperCamelCase__=None, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__, bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, **UpperCamelCase__ )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = classifier_dropout
class A ( __UpperCAmelCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 325
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( __UpperCAmelCase , unittest.TestCase ):
__snake_case = GPTSanJapaneseTokenizer
__snake_case = False
__snake_case = {'do_clean_text': False, 'add_prefix_space': False}
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
lowerCAmelCase_ = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
lowerCAmelCase_ = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
lowerCAmelCase_ = {'''unk_token''': '''<unk>'''}
lowerCAmelCase_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file, '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
lowerCAmelCase_ = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = self.get_input_output_texts(UpperCamelCase__ )
lowerCAmelCase_ = tokenizer.encode(UpperCamelCase__, add_special_tokens=UpperCamelCase__ )
lowerCAmelCase_ = tokenizer.decode(UpperCamelCase__, clean_up_tokenization_spaces=UpperCamelCase__ )
return text, ids
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ = '''こんにちは、世界。 こんばんは、㔺界。'''
lowerCAmelCase_ = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
lowerCAmelCase_ = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
# Testing conversion to ids without special tokens
lowerCAmelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
# Testing conversion to ids with special tokens
lowerCAmelCase_ = tokens + [tokenizer.unk_token]
lowerCAmelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
lowerCAmelCase_ = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
lowerCAmelCase_ = tokenizer.encode(UpperCamelCase__ )
lowerCAmelCase_ = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__, UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
lowerCAmelCase_ = '''こんにちは、世界。'''
lowerCAmelCase_ = '''こんばんは、㔺界。😀'''
lowerCAmelCase_ = '''こんにちは、世界。こんばんは、世界。😀'''
lowerCAmelCase_ = tokenizer.encode(prefix_text + input_text )
lowerCAmelCase_ = tokenizer.encode('''''', prefix_text=prefix_text + input_text )
lowerCAmelCase_ = tokenizer.encode(UpperCamelCase__, prefix_text=UpperCamelCase__ )
lowerCAmelCase_ = tokenizer.decode(UpperCamelCase__ )
lowerCAmelCase_ = tokenizer.decode(UpperCamelCase__ )
lowerCAmelCase_ = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__, UpperCamelCase__ )
self.assertEqual(UpperCamelCase__, UpperCamelCase__ )
self.assertEqual(UpperCamelCase__, UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
lowerCAmelCase_ = '''こんにちは、世界。'''
lowerCAmelCase_ = '''こんばんは、㔺界。😀'''
lowerCAmelCase_ = len(tokenizer.encode(UpperCamelCase__ ) ) - 2
lowerCAmelCase_ = len(tokenizer.encode(UpperCamelCase__ ) ) - 2
lowerCAmelCase_ = [1] + [0] * (len_prefix + len_text + 1)
lowerCAmelCase_ = [1] * (len_prefix + len_text + 1) + [0]
lowerCAmelCase_ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCAmelCase_ = tokenizer(prefix_text + input_text ).token_type_ids
lowerCAmelCase_ = tokenizer('''''', prefix_text=prefix_text + input_text ).token_type_ids
lowerCAmelCase_ = tokenizer(UpperCamelCase__, prefix_text=UpperCamelCase__ ).token_type_ids
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
lowerCAmelCase_ = tokenizer.encode('''あンいワ''' )
lowerCAmelCase_ = tokenizer.encode('''''', prefix_text='''あンいワ''' )
lowerCAmelCase_ = tokenizer.encode('''いワ''', prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(UpperCamelCase__ ), tokenizer.decode(UpperCamelCase__ ) )
self.assertEqual(tokenizer.decode(UpperCamelCase__ ), tokenizer.decode(UpperCamelCase__ ) )
self.assertNotEqual(UpperCamelCase__, UpperCamelCase__ )
self.assertNotEqual(UpperCamelCase__, UpperCamelCase__ )
self.assertEqual(x_token_a[1], x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1], x_token_a[3] ) # SEG token
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
lowerCAmelCase_ = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
lowerCAmelCase_ = tokenizer(UpperCamelCase__, padding=UpperCamelCase__ )
lowerCAmelCase_ = tokenizer.batch_encode_plus(UpperCamelCase__, padding=UpperCamelCase__ )
# fmt: off
lowerCAmelCase_ = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowerCAmelCase_ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCAmelCase_ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids, UpperCamelCase__ )
self.assertListEqual(x_token.token_type_ids, UpperCamelCase__ )
self.assertListEqual(x_token.attention_mask, UpperCamelCase__ )
self.assertListEqual(x_token_a.input_ids, UpperCamelCase__ )
self.assertListEqual(x_token_a.token_type_ids, UpperCamelCase__ )
self.assertListEqual(x_token_a.attention_mask, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
| 325
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
if "cls_token" in name:
UpperCAmelCase__ : Any = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
UpperCAmelCase__ : Optional[Any] = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
UpperCAmelCase__ : int = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
UpperCAmelCase__ : List[str] = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
UpperCAmelCase__ : List[str] = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
UpperCAmelCase__ : List[str] = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
UpperCAmelCase__ : Tuple = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
UpperCAmelCase__ : Union[str, Any] = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
UpperCAmelCase__ : Dict = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
UpperCAmelCase__ : List[Any] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
UpperCAmelCase__ : List[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
UpperCAmelCase__ : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
UpperCAmelCase__ : Optional[int] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCAmelCase__ : List[str] = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
UpperCAmelCase__ : Optional[Any] = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
UpperCAmelCase__ : List[str] = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
UpperCAmelCase__ : List[Any] = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
UpperCAmelCase__ : Any = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
UpperCAmelCase__ : List[str] = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ : Tuple = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
UpperCAmelCase__ : Optional[int] = key.split(""".""" )
UpperCAmelCase__ : Union[str, Any] = int(key_split[1] )
if "decoder_blocks" in key:
UpperCAmelCase__ : Optional[int] = config.decoder_hidden_size
UpperCAmelCase__ : Optional[int] = """decoder.decoder_layers."""
if "weight" in key:
UpperCAmelCase__ : Union[str, Any] = val[:dim, :]
UpperCAmelCase__ : Any = val[dim : dim * 2, :]
UpperCAmelCase__ : Optional[Any] = val[-dim:, :]
elif "bias" in key:
UpperCAmelCase__ : Optional[int] = val[:dim]
UpperCAmelCase__ : Any = val[dim : dim * 2]
UpperCAmelCase__ : List[Any] = val[-dim:]
else:
UpperCAmelCase__ : Union[str, Any] = config.hidden_size
UpperCAmelCase__ : List[str] = """vit.encoder.layer."""
if "weight" in key:
UpperCAmelCase__ : int = val[:dim, :]
UpperCAmelCase__ : Dict = val[dim : dim * 2, :]
UpperCAmelCase__ : Any = val[-dim:, :]
elif "bias" in key:
UpperCAmelCase__ : str = val[:dim]
UpperCAmelCase__ : Tuple = val[dim : dim * 2]
UpperCAmelCase__ : Dict = val[-dim:]
else:
UpperCAmelCase__ : Tuple = val
return orig_state_dict
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCAmelCase__ : int = 1024
UpperCAmelCase__ : List[str] = 4096
UpperCAmelCase__ : List[Any] = 24
UpperCAmelCase__ : Any = 16
elif "huge" in checkpoint_url:
UpperCAmelCase__ : List[str] = 14
UpperCAmelCase__ : int = 1280
UpperCAmelCase__ : int = 5120
UpperCAmelCase__ : int = 32
UpperCAmelCase__ : List[Any] = 16
UpperCAmelCase__ : str = ViTMAEForPreTraining(__UpperCamelCase )
UpperCAmelCase__ : Any = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location="""cpu""" )["""model"""]
UpperCAmelCase__ : Dict = ViTMAEImageProcessor(size=config.image_size )
UpperCAmelCase__ : Optional[Any] = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : List[Any] = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
UpperCAmelCase__ : int = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
UpperCAmelCase__ : Tuple = ViTMAEImageProcessor(size=config.image_size )
UpperCAmelCase__ : List[Any] = image_processor(images=__UpperCamelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
UpperCAmelCase__ : Any = model(**__UpperCamelCase )
UpperCAmelCase__ : int = outputs.logits
if "large" in checkpoint_url:
UpperCAmelCase__ : Optional[int] = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
UpperCAmelCase__ : List[str] = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 65
|
import numpy
# List of input, output pairs
A_ : Any = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
A_ : List[Any] = (((515, 22, 13), 555), ((61, 35, 49), 150))
A_ : Any = [2, 4, 1, 5]
A_ : List[Any] = len(train_data)
A_ : List[Any] = 0.009
def snake_case (UpperCAmelCase__ , UpperCAmelCase__="train" ) -> Optional[int]:
return calculate_hypothesis_value(UpperCAmelCase__ , UpperCAmelCase__ ) - output(
UpperCAmelCase__ , UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ ) -> Optional[Any]:
UpperCamelCase_: Optional[Any] = 0
for i in range(len(UpperCAmelCase__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def snake_case (UpperCAmelCase__ , UpperCAmelCase__=m ) -> Optional[Any]:
UpperCamelCase_: Any = 0
for i in range(UpperCAmelCase__ ):
if index == -1:
summation_value += _error(UpperCAmelCase__ )
else:
summation_value += _error(UpperCAmelCase__ ) * train_data[i][0][index]
return summation_value
def snake_case (UpperCAmelCase__ ) -> Optional[Any]:
UpperCamelCase_: Optional[int] = summation_of_cost_derivative(UpperCAmelCase__ , UpperCAmelCase__ ) / m
return cost_derivative_value
def snake_case () -> Union[str, Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCamelCase_: str = 0.00_0002
UpperCamelCase_: Any = 0
UpperCamelCase_: int = 0
while True:
j += 1
UpperCamelCase_: int = [0, 0, 0, 0]
for i in range(0 , len(UpperCAmelCase__ ) ):
UpperCamelCase_: Any = get_cost_derivative(i - 1 )
UpperCamelCase_: Optional[int] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCAmelCase__ , UpperCAmelCase__ , atol=UpperCAmelCase__ , rtol=UpperCAmelCase__ , ):
break
UpperCamelCase_: Optional[int] = temp_parameter_vector
print(('Number of iterations:', j) )
def snake_case () -> int:
for i in range(len(UpperCAmelCase__ ) ):
print(('Actual output value:', output(UpperCAmelCase__ , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(UpperCAmelCase__ , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 57
| 0
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__snake_case = '''0.12''' # assumed parallelism: 8
@require_flax
@is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _UpperCAmelCase ( cls ) -> Optional[Any]:
_a = TOKEN
HfFolder.save_token(__A )
@classmethod
def _UpperCAmelCase ( cls ) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def _UpperCAmelCase ( self ) -> Tuple:
_a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
_a = FlaxBertModel(__A )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
_a = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' )
_a = flatten_dict(unfreeze(model.params ) )
_a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__A , 1e-3 , msg=F'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__A , repo_id='''test-model-flax''' , push_to_hub=__A , use_auth_token=self._token )
_a = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' )
_a = flatten_dict(unfreeze(model.params ) )
_a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__A , 1e-3 , msg=F'{key} not identical' )
def _UpperCAmelCase ( self ) -> str:
_a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
_a = FlaxBertModel(__A )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
_a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
_a = flatten_dict(unfreeze(model.params ) )
_a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__A , 1e-3 , msg=F'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__A , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__A , use_auth_token=self._token )
_a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
_a = flatten_dict(unfreeze(model.params ) )
_a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__A , 1e-3 , msg=F'{key} not identical' )
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_a = True
_a = flatten_dict(modela.params )
_a = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
_a = False
return models_are_equal
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
_a = FlaxBertModel(__A )
_a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__A , __A ) )
with self.assertRaises(__A ):
_a = FlaxBertModel.from_pretrained(__A )
_a = FlaxBertModel.from_pretrained(__A , subfolder=__A )
self.assertTrue(check_models_equal(__A , __A ) )
def _UpperCAmelCase ( self ) -> Dict:
_a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
_a = FlaxBertModel(__A )
_a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__A , __A ) , max_shard_size='''10KB''' )
with self.assertRaises(__A ):
_a = FlaxBertModel.from_pretrained(__A )
_a = FlaxBertModel.from_pretrained(__A , subfolder=__A )
self.assertTrue(check_models_equal(__A , __A ) )
def _UpperCAmelCase ( self ) -> str:
_a = '''bert'''
_a = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(__A ):
_a = FlaxBertModel.from_pretrained(__A )
_a = FlaxBertModel.from_pretrained(__A , subfolder=__A )
self.assertIsNotNone(__A )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = '''bert'''
_a = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(__A ):
_a = FlaxBertModel.from_pretrained(__A )
_a = FlaxBertModel.from_pretrained(__A , subfolder=__A )
self.assertIsNotNone(__A )
| 715
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 285
| 0
|
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _lowerCamelCase( a=3_2 , a=1_0 , a=1_0_0 , a=1_0_2_6 , a=True , a="data/tokenized_stories_train_wikitext103.jbl" , a="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
__a , __a = generate_datasets(
a , a , number=a , min_len=1_0_2_6 , trim=a )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__a = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
__a = load_gpta("gpt2" ).to(a )
print("computing perplexity on objective set" )
__a = compute_perplexity(a , a , a ).item()
print("perplexity on objective set:" , a )
# collect igf pairs and save to file demo.jbl
collect_objective_set(a , a , a , a , a , a , a , a )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _lowerCamelCase( a , a=1_5 , a=1_2_8 , a=1_0_0 , a="igf_model.pt" , ):
set_seed(4_2 )
# Load pre-trained model
__a = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
__a = SecondaryLearner(a )
# Train secondary learner
__a = train_secondary_learner(
a , a , max_epochs=a , batch_size=a , eval_freq=1_0_0 , igf_model_path=a , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _lowerCamelCase( a , a , a , a=3_2 , a=1_0_0_0 , a=1_6 , a=1.0 , a=recopy_gpta , a=None , a=1_0 , a="gpt2_finetuned.pt" , ):
__a = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
__a = RandomSampler(a )
__a = DataLoader(a , sampler=a )
__a = max_steps // (len(a )) + 1
__a = 0
__a = torch.zeros((1, context_len) , dtype=torch.long , device=a )
__a , __a , __a = recopy_model(a , a , a )
model.train()
if secondary_learner is not None:
secondary_learner.to(a )
secondary_learner.eval()
__a = []
__a = 0
__a = []
__a = []
# Compute the performance of the transformer model at the beginning
__a = compute_perplexity(a , a , a )
test_perps.append(a )
print("Test perplexity, step" , a , ":" , a )
for epoch in range(int(a ) ):
for step, example in enumerate(a ):
torch.cuda.empty_cache()
__a = random.randint(0 , example.size(2 ) - context_len - 1 )
__a = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__a = model(a , labels=a )
__a = True
if secondary_learner is not None:
__a = secondary_learner.forward(
torch.tensor(a , dtype=torch.long , device=a ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(a ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 1_0:
__a = -1
if predicted_q < threshold:
__a = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__a = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__a = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__a = compute_perplexity(a , a , a )
test_perps.append(a )
print("Test perplexity, step" , a , ":" , a )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 6_0:
break
if max_steps > 0 and global_step > 6_0:
break
# save finetuned transformer model
torch.save(model.state_dict() , a )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _lowerCamelCase( ):
__a = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=a , type=a , required=a , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=a , type=a , required=a , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=a , default=a , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=a , default=a , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=a , type=a , required=a , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=a , type=a , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=a , default=a , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=3_2 , type=a , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=1_0_0 , type=a , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=1_0_0 , type=a , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1_0_0_0 , type=a , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=1_2_8 , type=a , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=1_6 , type=a , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=1_0 , type=a , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=1_0_0 , type=a , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1_0_2_6 , type=a , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=1_5 , type=a , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=a , type=a , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=a , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=a , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=a , type=a , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=3_2 , max_steps=1_0 , size_objective_set=1_0_0 , min_len=1_0_2_6 , trim=a , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
__a = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
__a = training_secondary_learner(
a , secondary_learner_max_epochs=1_5 , secondary_learner_batch_size=1_2_8 , eval_freq=1_0_0 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
__a = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(4_2 )
# Generate train and test data to train and evaluate gpt2 model
__a , __a = generate_datasets(
context_len=3_2 , file="data/tokenized_stories_train_wikitext103.jbl" , number=1_0_0 , min_len=1_0_2_6 , trim=a )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
a , a , a , context_len=3_2 , max_steps=1_0_0_0 , batch_size=1_6 , threshold=1.0 , recopy_model=a , secondary_learner=a , eval_interval=1_0 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 528
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _lowerCamelCase( ):
raise RuntimeError("CUDA out of memory." )
class snake_case__ ( nn.Module ):
def __init__( self ):
super().__init__()
__a = nn.Linear(3 , 4 )
__a = nn.BatchNormad(4 )
__a = nn.Linear(4 , 5 )
def a__ ( self , lowerCamelCase ):
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase ) ) )
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
__a = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowerCamelCase ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase , [128, 64, 32, 16, 8] )
def a__ ( self ):
__a = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowerCamelCase , lowerCamelCase ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__a , __a = mock_training_loop_function("hello" )
self.assertListEqual(lowerCamelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def a__ ( self ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCamelCase ):
pass
with self.assertRaises(lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def a__ ( self ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def a__ ( self ):
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase ) as cm:
mock_training_loop_function(128 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def a__ ( self ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def a__ ( self ):
__a = torch.cuda.memory_allocated()
__a = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase )
__a = release_memory(lowerCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase )
| 528
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionXLImgaImgPipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
A_ = PipelineTesterMixin.required_optional_params - {'latents'}
A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case_( self )-> List[Any]:
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=_lowercase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
lowercase__ = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=3_2 , )
lowercase__ = CLIPTextModel(_lowercase )
lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=_lowercase )
lowercase__ = CLIPTextModelWithProjection(_lowercase )
lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=_lowercase )
lowercase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def snake_case_( self , _lowerCamelCase , _lowerCamelCase=0 )-> str:
lowercase__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowercase ) ).to(_lowercase )
lowercase__ = image / 2 + 0.5
if str(_lowercase ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(_lowercase )
else:
lowercase__ = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
lowercase__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.7_5,
}
return inputs
def snake_case_( self )-> Union[str, Any]:
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionXLImgaImgPipeline(**_lowercase )
lowercase__ = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = sd_pipe(**_lowercase ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowercase__ = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_( self )-> Optional[int]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case_( self )-> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case_( self )-> Dict:
pass
def snake_case_( self )-> List[str]:
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionXLImgaImgPipeline(**_lowercase )
lowercase__ = sd_pipe.to(_lowercase )
lowercase__ = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
# forward without prompt embeds
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = 3 * ["""this is a negative prompt"""]
lowercase__ = negative_prompt
lowercase__ = 3 * [inputs["""prompt"""]]
lowercase__ = sd_pipe(**_lowercase )
lowercase__ = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = 3 * ["""this is a negative prompt"""]
lowercase__ = 3 * [inputs.pop('''prompt''' )]
(
lowercase__
) = sd_pipe.encode_prompt(_lowercase , negative_prompt=_lowercase )
lowercase__ = sd_pipe(
**_lowercase , prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , pooled_prompt_embeds=_lowercase , negative_pooled_prompt_embeds=_lowercase , )
lowercase__ = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
"""simple docstring"""
def snake_case_( self )-> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_( self , _lowerCamelCase , _lowerCamelCase="cpu" , _lowerCamelCase=torch.floataa , _lowerCamelCase=0 )-> List[str]:
lowercase__ = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
lowercase__ = np.random.RandomState(_lowercase ).standard_normal((1, 4, 6_4, 6_4) )
lowercase__ = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
lowercase__ = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def snake_case_( self )-> str:
lowercase__ = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_inputs(_lowercase )
lowercase__ = pipe(**_lowercase ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase__ = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 719
|
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def _lowerCAmelCase ( ) ->Optional[int]:
"""simple docstring"""
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=lowercase , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=lowercase , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=lowercase , help='''where to store parsed gold_data_path file''' , )
lowercase__ = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
lowercase__ = json.load(lowercase )
for dpr_record in tqdm(lowercase ):
lowercase__ = dpr_record['''question''']
lowercase__ = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(lowercase ) + '''\n''' )
if __name__ == "__main__":
main()
| 318
| 0
|
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = CustomTokenizer
pass
| 59
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowercase : List[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = ['pixel_values']
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = size if size is not None else {"shortest_edge": 2_2_4}
lowerCAmelCase = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
lowerCAmelCase = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
lowerCAmelCase = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE , param_name="crop_size" )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase = do_convert_rgb
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Dict , ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
lowerCAmelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=size["shortest_edge"] , default_to_square=SCREAMING_SNAKE_CASE )
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : List[Any] , ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase = get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Union[int, float] , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : List[Any] , ) -> Optional[int]:
"""simple docstring"""
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Union[float, List[float]] , SCREAMING_SNAKE_CASE : Union[float, List[float]] , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : int = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : float = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Optional[ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Optional[int] , ) -> PIL.Image.Image:
"""simple docstring"""
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(SCREAMING_SNAKE_CASE , param_name="size" , default_to_square=SCREAMING_SNAKE_CASE )
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(SCREAMING_SNAKE_CASE , param_name="crop_size" , default_to_square=SCREAMING_SNAKE_CASE )
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase = [convert_to_rgb(SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
lowerCAmelCase = [self.center_crop(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
lowerCAmelCase = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 649
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["transformers", "torch", "note_seq"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 489
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = "▁"
_SCREAMING_SNAKE_CASE = {"vocab_file": "sentencepiece.bpe.model"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
_SCREAMING_SNAKE_CASE = {
"facebook/xglm-564M": 20_48,
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : int = VOCAB_FILES_NAMES
__lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : str = ["input_ids", "attention_mask"]
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_lowerCAmelCase = 7
_lowerCAmelCase = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
_lowerCAmelCase = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
_lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
_lowerCAmelCase = len(self.sp_model )
_lowerCAmelCase = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_lowerCAmelCase )
_lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[str]:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
_lowerCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_lowerCAmelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase ))
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase ))
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _snake_case ( self ) -> str:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _snake_case ( self ) -> Any:
_lowerCAmelCase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase = self.sp_model.PieceToId(_lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , _lowerCAmelCase ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , _lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = "".join(_lowerCAmelCase ).replace(_lowerCAmelCase , " " ).strip()
return out_string
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 489
| 1
|
def __snake_case ( lowerCAmelCase_ ) -> float:
SCREAMING_SNAKE_CASE__ = 0
while len(lowerCAmelCase_ ) > 1:
SCREAMING_SNAKE_CASE__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
SCREAMING_SNAKE_CASE__ = files.index(min(lowerCAmelCase_ ) )
temp += files[min_index]
files.pop(lowerCAmelCase_ )
files.append(lowerCAmelCase_ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( snake_case_ ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'LayoutLMv3ImageProcessor'
lowercase = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : List[Any] , snake_case : int=None , snake_case : str=None , **snake_case : int ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case , )
UpperCamelCase_ : Optional[Any] = kwargs.pop('feature_extractor' )
UpperCamelCase_ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case , snake_case )
def __call__( self : Optional[Any] , snake_case : List[Any] , snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , snake_case : Union[List[List[int]], List[List[List[int]]]] = None , snake_case : Optional[Union[List[int], List[List[int]]]] = None , snake_case : bool = True , snake_case : Union[bool, str, PaddingStrategy] = False , snake_case : Union[bool, str, TruncationStrategy] = None , snake_case : Optional[int] = None , snake_case : int = 0 , snake_case : Optional[int] = None , snake_case : Optional[bool] = None , snake_case : Optional[bool] = None , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , snake_case : bool = True , snake_case : Optional[Union[str, TensorType]] = None , **snake_case : Optional[int] , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
UpperCamelCase_ : Optional[int] = self.image_processor(images=snake_case , return_tensors=snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(snake_case , snake_case ):
UpperCamelCase_ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase_ : str = features['words']
UpperCamelCase_ : int = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=snake_case , add_special_tokens=snake_case , padding=snake_case , truncation=snake_case , max_length=snake_case , stride=snake_case , pad_to_multiple_of=snake_case , return_token_type_ids=snake_case , return_attention_mask=snake_case , return_overflowing_tokens=snake_case , return_special_tokens_mask=snake_case , return_offsets_mapping=snake_case , return_length=snake_case , verbose=snake_case , return_tensors=snake_case , **snake_case , )
# add pixel values
UpperCamelCase_ : int = features.pop('pixel_values' )
if return_overflowing_tokens is True:
UpperCamelCase_ : Optional[Any] = self.get_overflowing_images(snake_case , encoded_inputs['overflow_to_sample_mapping'] )
UpperCamelCase_ : List[str] = images
return encoded_inputs
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Any , snake_case : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(snake_case ) != len(snake_case ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f" {len(snake_case )} and {len(snake_case )}" )
return images_with_overflow
def SCREAMING_SNAKE_CASE__ ( self : List[str] , *snake_case : Dict , **snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , *snake_case : Optional[int] , **snake_case : int ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case , )
return self.image_processor
| 417
| 0
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def a ( _UpperCAmelCase : str , _UpperCAmelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Tuple = list(_UpperCAmelCase )
__UpperCAmelCase : Any = list(_UpperCAmelCase )
__UpperCAmelCase : List[Any] = 0
for i in range(len(_UpperCAmelCase ) ):
if lista[i] != lista[i]:
count += 1
__UpperCAmelCase : List[str] = '''_'''
if count > 1:
return False
else:
return "".join(_UpperCAmelCase )
def a ( _UpperCAmelCase : list[str] ):
'''simple docstring'''
__UpperCAmelCase : Dict = []
while True:
__UpperCAmelCase : Union[str, Any] = ['''$'''] * len(_UpperCAmelCase )
__UpperCAmelCase : Tuple = []
for i in range(len(_UpperCAmelCase ) ):
for j in range(i + 1 , len(_UpperCAmelCase ) ):
__UpperCAmelCase : Optional[int] = compare_string(binary[i] , binary[j] )
if k is False:
__UpperCAmelCase : List[Any] = '''*'''
__UpperCAmelCase : int = '''*'''
temp.append('''X''' )
for i in range(len(_UpperCAmelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_UpperCAmelCase ) == 0:
return pi
__UpperCAmelCase : str = list(set(_UpperCAmelCase ) )
def a ( _UpperCAmelCase : int , _UpperCAmelCase : Sequence[float] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = []
for minterm in minterms:
__UpperCAmelCase : int = ''''''
for _ in range(_UpperCAmelCase ):
__UpperCAmelCase : List[str] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_UpperCAmelCase )
return temp
def a ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = list(_UpperCAmelCase )
__UpperCAmelCase : Dict = list(_UpperCAmelCase )
__UpperCAmelCase : Optional[int] = 0
for i in range(len(_UpperCAmelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def a ( _UpperCAmelCase : list[list[int]] , _UpperCAmelCase : list[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Union[str, Any] = [0] * len(_UpperCAmelCase )
for i in range(len(chart[0] ) ):
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : int = -1
for j in range(len(_UpperCAmelCase ) ):
if chart[j][i] == 1:
count += 1
__UpperCAmelCase : Optional[Any] = j
if count == 1:
__UpperCAmelCase : List[Any] = 1
for i in range(len(_UpperCAmelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_UpperCAmelCase ) ):
__UpperCAmelCase : Any = 0
temp.append(prime_implicants[i] )
while True:
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : List[str] = -1
__UpperCAmelCase : List[Any] = 0
for i in range(len(_UpperCAmelCase ) ):
__UpperCAmelCase : List[Any] = chart[i].count(1 )
if count_n > max_n:
__UpperCAmelCase : str = count_n
__UpperCAmelCase : str = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_UpperCAmelCase ) ):
__UpperCAmelCase : Optional[int] = 0
def a ( _UpperCAmelCase : list[str] , _UpperCAmelCase : list[str] ):
'''simple docstring'''
__UpperCAmelCase : str = [[0 for x in range(len(_UpperCAmelCase ) )] for x in range(len(_UpperCAmelCase ) )]
for i in range(len(_UpperCAmelCase ) ):
__UpperCAmelCase : Union[str, Any] = prime_implicants[i].count('''_''' )
for j in range(len(_UpperCAmelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _UpperCAmelCase ):
__UpperCAmelCase : str = 1
return chart
def a ( ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = int(input('''Enter the no. of variables\n''' ) )
__UpperCAmelCase : str = [
float(_UpperCAmelCase )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
__UpperCAmelCase : Tuple = decimal_to_binary(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = check(_UpperCAmelCase )
print('''Prime Implicants are:''' )
print(_UpperCAmelCase )
__UpperCAmelCase : Tuple = prime_implicant_chart(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = selection(_UpperCAmelCase , _UpperCAmelCase )
print('''Essential Prime Implicants are:''' )
print(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 241
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
@require_torch
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
__UpperCAmelCase : Optional[int] = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
__UpperCAmelCase : Dict = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
__UpperCAmelCase : List[Any] = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(a_ )
BertModel.from_pretrained(a_ )
BertTokenizer.from_pretrained(a_ )
pipeline(task='''fill-mask''' , model=a_ )
# baseline - just load from_pretrained with normal network
__UpperCAmelCase : Dict = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
__UpperCAmelCase : Union[str, Any] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__UpperCAmelCase : List[str] = '''1'''
__UpperCAmelCase : Dict = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : str = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
__UpperCAmelCase : Tuple = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
__UpperCAmelCase : Dict = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
__UpperCAmelCase : str = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(a_ )
BertModel.from_pretrained(a_ )
BertTokenizer.from_pretrained(a_ )
pipeline(task='''fill-mask''' , model=a_ )
# baseline - just load from_pretrained with normal network
__UpperCAmelCase : Dict = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
__UpperCAmelCase : Any = self.get_env()
__UpperCAmelCase : List[Any] = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Tuple = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
__UpperCAmelCase : List[str] = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
__UpperCAmelCase : Any = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
__UpperCAmelCase : List[str] = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
__UpperCAmelCase : Tuple = self.get_env()
__UpperCAmelCase : Dict = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
__UpperCAmelCase : Tuple = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__UpperCAmelCase : Tuple = '''1'''
__UpperCAmelCase : List[Any] = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Any = '''
from transformers import pipeline
'''
__UpperCAmelCase : Optional[Any] = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
__UpperCAmelCase : List[Any] = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
__UpperCAmelCase : Any = self.get_env()
__UpperCAmelCase : Dict = '''1'''
__UpperCAmelCase : Dict = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
__UpperCAmelCase : str = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : str = '''
from transformers import AutoModel
'''
__UpperCAmelCase : Any = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
__UpperCAmelCase : List[str] = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
__UpperCAmelCase : Optional[int] = self.get_env()
__UpperCAmelCase : Dict = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__UpperCAmelCase : Optional[int] = '''1'''
__UpperCAmelCase : Optional[int] = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 241
| 1
|
from math import isclose, sqrt
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = point_y / 4 / point_x
__lowercase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__lowercase = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__lowercase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__lowercase = outgoing_gradient**2 + 4
__lowercase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__lowercase = (point_y - outgoing_gradient * point_x) ** 2 - 100
__lowercase = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__lowercase = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__lowercase = x_minus if isclose(lowerCamelCase , lowerCamelCase ) else x_plus
__lowercase = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def snake_case ( lowerCamelCase = 1.4 , lowerCamelCase = -9.6 ):
'''simple docstring'''
__lowercase = 0
__lowercase = first_x_coord
__lowercase = first_y_coord
__lowercase = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__lowercase , __lowercase , __lowercase = next_point(lowerCamelCase , lowerCamelCase , lowerCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'''{solution() = }''')
| 80
|
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ComputeEnvironment.AMAZON_SAGEMAKER
UpperCamelCase_ = True
UpperCamelCase_ = '''ml.p3.2xlarge'''
UpperCamelCase_ = '''accelerate_sagemaker_execution_role'''
UpperCamelCase_ = '''hf-sm'''
UpperCamelCase_ = '''us-east-1'''
UpperCamelCase_ = 1
UpperCamelCase_ = '''accelerate-sagemaker-1'''
UpperCamelCase_ = '''1.6'''
UpperCamelCase_ = '''4.4'''
UpperCamelCase_ = '''train.py'''
UpperCamelCase_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
UpperCamelCase_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Dict:
'''simple docstring'''
lowercase : str =_convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCAmelCase )
assert isinstance(converted_args['''do_train'''] , UpperCAmelCase )
assert isinstance(converted_args['''epochs'''] , UpperCAmelCase )
assert isinstance(converted_args['''learning_rate'''] , UpperCAmelCase )
assert isinstance(converted_args['''max_steps'''] , UpperCAmelCase )
with pytest.raises(UpperCAmelCase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 94
| 0
|
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
return len(set(SCREAMING_SNAKE_CASE_ ) ) == len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
snake_case_ = ['''text''', '''image''', '''audio''']
def A__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowerCamelCase : Union[str, Any] =[]
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
inputs.append(create_inputs(SCREAMING_SNAKE_CASE_ ) )
else:
raise ValueError(F"Invalid type requested: {input_type}" )
return inputs
def A__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowerCamelCase : str =[]
for output in outputs:
if isinstance(SCREAMING_SNAKE_CASE_ , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(SCREAMING_SNAKE_CASE_ , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(SCREAMING_SNAKE_CASE_ , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(F"Invalid output: {output}" )
return output_types
@is_tool_test
class snake_case_ :
def __lowercase ( self ) -> Tuple:
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
lowerCamelCase : List[Any] =self.tool.inputs
for _input in inputs:
if isinstance(_input , __lowercase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCamelCase : List[Any] =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def __lowercase ( self ) -> Any:
lowerCamelCase : Dict =create_inputs(self.tool.inputs )
lowerCamelCase : Any =self.tool(*__lowercase )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCamelCase : Union[str, Any] =[outputs]
self.assertListEqual(output_types(__lowercase ) , self.tool.outputs )
def __lowercase ( self ) -> Any:
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def __lowercase ( self ) -> Any:
lowerCamelCase : Dict =create_inputs(self.tool.inputs )
lowerCamelCase : Dict =self.tool(*__lowercase )
if not isinstance(__lowercase , __lowercase ):
lowerCamelCase : int =[outputs]
self.assertEqual(len(__lowercase ) , len(self.tool.outputs ) )
for output, output_type in zip(__lowercase , self.tool.outputs ):
lowerCamelCase : Any =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__lowercase , __lowercase ) )
def __lowercase ( self ) -> List[str]:
lowerCamelCase : Optional[Any] =create_inputs(self.tool.inputs )
lowerCamelCase : str =[]
for _input, input_type in zip(__lowercase , self.tool.inputs ):
if isinstance(__lowercase , __lowercase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCamelCase : Union[str, Any] =self.tool(*__lowercase )
if not isinstance(__lowercase , __lowercase ):
lowerCamelCase : int =[outputs]
self.assertEqual(len(__lowercase ) , len(self.tool.outputs ) )
| 262
| 1
|
from collections import defaultdict
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __lowercase : str , __lowercase : Union[str, Any] ):
"""simple docstring"""
snake_case_ = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
snake_case_ = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(__lowercase ) )
]
snake_case_ = defaultdict(__lowercase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
snake_case_ = (1 << len(__lowercase )) - 1
def snake_case__ ( self : List[Any] , __lowercase : str , __lowercase : Optional[int] ):
"""simple docstring"""
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
snake_case_ = self.count_ways_until(__lowercase , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
snake_case_ = total_ways_util
return self.dp[mask][task_no]
def snake_case__ ( self : Optional[Any] , __lowercase : Tuple ):
"""simple docstring"""
for i in range(len(__lowercase ) ):
for j in task_performed[i]:
self.task[j].append(__lowercase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
lowercase__ : int = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
lowercase__ : Any = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 376
|
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowercase__ : Dict = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __lowercase : int = 14 ):
"""simple docstring"""
if group not in primes:
raise ValueError("Unsupported Group" )
snake_case_ = primes[group]["prime"]
snake_case_ = primes[group]["generator"]
snake_case_ = int(hexlify(urandom(32 ) ) , base=16 )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
return hex(self.__private_key )[2:]
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = pow(self.generator , self.__private_key , self.prime )
return hex(__lowercase )[2:]
def snake_case__ ( self : int , __lowercase : int ):
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(__lowercase , (self.prime - 1) // 2 , self.prime ) == 1
)
def snake_case__ ( self : Union[str, Any] , __lowercase : str ):
"""simple docstring"""
snake_case_ = int(__lowercase , base=16 )
if not self.is_valid_public_key(__lowercase ):
raise ValueError("Invalid public key" )
snake_case_ = pow(__lowercase , self.__private_key , self.prime )
return shaaaa(str(__lowercase ).encode() ).hexdigest()
@staticmethod
def snake_case__ ( __lowercase : int , __lowercase : int ):
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(__lowercase , (prime - 1) // 2 , __lowercase ) == 1
)
@staticmethod
def snake_case__ ( __lowercase : str , __lowercase : str , __lowercase : int = 14 ):
"""simple docstring"""
snake_case_ = int(__lowercase , base=16 )
snake_case_ = int(__lowercase , base=16 )
snake_case_ = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(__lowercase , __lowercase ):
raise ValueError("Invalid public key" )
snake_case_ = pow(__lowercase , __lowercase , __lowercase )
return shaaaa(str(__lowercase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 376
| 1
|
import os
from distutils.util import strtobool
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
for e in env_keys:
UpperCAmelCase = int(os.environ.get(_lowerCAmelCase , -1 ) )
if val >= 0:
return val
return default
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False ):
"""simple docstring"""
UpperCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return strtobool(_lowerCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase="no" ):
"""simple docstring"""
UpperCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return value
| 405
|
class __magic_name__ ( _a):
pass
class __magic_name__ ( _a):
pass
class __magic_name__ :
def __init__( self : Optional[int] ):
UpperCAmelCase = [
[],
[],
[],
]
def _UpperCAmelCase ( self : Tuple ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : int ):
try:
if len(self.queues[priority] ) >= 1_0_0:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(__SCREAMING_SNAKE_CASE )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def _UpperCAmelCase ( self : List[str] ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self : Optional[Any] ):
return "\n".join(f'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class __magic_name__ :
def __init__( self : Any ):
UpperCAmelCase = []
def _UpperCAmelCase ( self : List[str] ,__SCREAMING_SNAKE_CASE : int ):
if len(self.queue ) == 1_0_0:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Optional[Any] ):
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
UpperCAmelCase = min(self.queue )
self.queue.remove(__SCREAMING_SNAKE_CASE )
return data
def __str__( self : Optional[Any] ):
return str(self.queue )
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(_lowerCAmelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_lowerCAmelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(_lowerCAmelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_lowerCAmelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 405
| 1
|
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
a__ = 8
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Tuple=BITS ) -> List[str]:
"""simple docstring"""
_a : Optional[Any] = x.device
_a : int = (x * 255).int().clamp(0 ,255 )
_a : Optional[int] = 2 ** torch.arange(bits - 1 ,-1 ,-1 ,device=__a )
_a : Union[str, Any] = rearrange(__a ,'''d -> d 1 1''' )
_a : Tuple = rearrange(__a ,'''b c h w -> b c 1 h w''' )
_a : int = ((x & mask) != 0).float()
_a : Dict = rearrange(__a ,'''b c d h w -> b (c d) h w''' )
_a : Any = bits * 2 - 1
return bits
def __UpperCAmelCase ( __a : List[str] ,__a : List[str]=BITS ) -> Optional[Any]:
"""simple docstring"""
_a : Any = x.device
_a : Union[str, Any] = (x > 0).int()
_a : Dict = 2 ** torch.arange(bits - 1 ,-1 ,-1 ,device=__a ,dtype=torch.intaa )
_a : Any = rearrange(__a ,'''d -> d 1 1''' )
_a : Dict = rearrange(__a ,'''b (c d) h w -> b c d h w''' ,d=8 )
_a : Optional[Any] = reduce(x * mask ,'''b c d h w -> b c h w''' ,'''sum''' )
return (dec / 255).clamp(0.0 ,1.0 )
def __UpperCAmelCase ( self : Optional[int] ,__a : torch.FloatTensor ,__a : int ,__a : torch.FloatTensor ,__a : float = 0.0 ,__a : bool = True ,__a : int=None ,__a : bool = True ,) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_a : List[str] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_a : str = self.alphas_cumprod[timestep]
_a : str = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_a : Union[str, Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_a : List[str] = self.bit_scale
if self.config.clip_sample:
_a : Tuple = torch.clamp(__a ,-scale ,__a )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_a : List[Any] = self._get_variance(__a ,__a )
_a : List[str] = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_a : Dict = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a : List[Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a : Optional[Any] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_a : Optional[Any] = model_output.device if torch.is_tensor(__a ) else '''cpu'''
_a : Tuple = torch.randn(model_output.shape ,dtype=model_output.dtype ,generator=__a ).to(__a )
_a : Any = self._get_variance(__a ,__a ) ** 0.5 * eta * noise
_a : Dict = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__a ,pred_original_sample=__a )
def __UpperCAmelCase ( self : List[Any] ,__a : torch.FloatTensor ,__a : int ,__a : torch.FloatTensor ,__a : str="epsilon" ,__a : Optional[Any]=None ,__a : bool = True ,) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
_a : int = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_a , _a : int = torch.split(__a ,sample.shape[1] ,dim=1 )
else:
_a : List[str] = None
# 1. compute alphas, betas
_a : Any = self.alphas_cumprod[t]
_a : Dict = self.alphas_cumprod[t - 1] if t > 0 else self.one
_a : int = 1 - alpha_prod_t
_a : Union[str, Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_a : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_a : Dict = model_output
else:
raise ValueError(F"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
_a : int = self.bit_scale
if self.config.clip_sample:
_a : Optional[Any] = torch.clamp(__a ,-scale ,__a )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a : List[Any] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_a : Optional[int] = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_a : Tuple = 0
if t > 0:
_a : Any = torch.randn(
model_output.size() ,dtype=model_output.dtype ,layout=model_output.layout ,generator=__a ).to(model_output.device )
_a : str = (self._get_variance(__a ,predicted_variance=__a ) ** 0.5) * noise
_a : str = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__a ,pred_original_sample=__a )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a , _a , _a = 1.0 , ) -> Optional[Any]:
super().__init__()
_a : Optional[Any] = bit_scale
_a : Dict = (
ddim_bit_scheduler_step if isinstance(_a , _a ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = 2_5_6 , _a = 2_5_6 , _a = 5_0 , _a = None , _a = 1 , _a = "pil" , _a = True , **_a , ) -> Union[Tuple, ImagePipelineOutput]:
_a : Dict = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=_a , )
_a : List[str] = decimal_to_bits(_a ) * self.bit_scale
_a : str = latents.to(self.device )
self.scheduler.set_timesteps(_a )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_a : Any = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
_a : Dict = self.scheduler.step(_a , _a , _a ).prev_sample
_a : str = bits_to_decimal(_a )
if output_type == "pil":
_a : List[Any] = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 14
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __magic_name__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = BlipImageProcessor()
UpperCAmelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
UpperCAmelCase = BlipProcessor(UpperCamelCase__ , UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Any , **UpperCamelCase__ : Any ) -> Dict:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).tokenizer
def SCREAMING_SNAKE_CASE_ ( self : Dict , **UpperCamelCase__ : Dict ) -> int:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCAmelCase = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> int:
'''simple docstring'''
UpperCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
UpperCAmelCase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(UpperCamelCase__ , return_tensors="np" )
UpperCAmelCase = processor(images=UpperCamelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCAmelCase = "lower newer"
UpperCAmelCase = processor(text=UpperCamelCase__ )
UpperCAmelCase = tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCAmelCase = "lower newer"
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase = processor.batch_decode(UpperCamelCase__ )
UpperCAmelCase = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCAmelCase = "lower newer"
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 323
| 0
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: List[str] , __A: Optional[int] , __A: Dict=None , __A: str=None , __A: Any=None , __A: Optional[Any]="resnet50" , __A: Any=3 , __A: Union[str, Any]=32 , __A: Optional[int]=3 , __A: Any=True , __A: str=True , ):
'''simple docstring'''
a__ = parent
a__ = out_indices if out_indices is not None else [4]
a__ = stage_names
a__ = out_features
a__ = backbone
a__ = batch_size
a__ = image_size
a__ = num_channels
a__ = use_pretrained_backbone
a__ = is_training
def lowercase ( self: Optional[Any] ):
'''simple docstring'''
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = self.get_config()
return config, pixel_values
def lowercase ( self: Any ):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase ( self: Dict , __A: int , __A: Dict ):
'''simple docstring'''
a__ = TimmBackbone(config=__A )
model.to(__A )
model.eval()
with torch.no_grad():
a__ = model(__A )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowercase ( self: List[str] ):
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
a__ ,a__ = config_and_inputs
a__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(TimmBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE ={'feature-extraction': TimmBackbone} if is_torch_available() else {}
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
def lowercase ( self: List[Any] ):
'''simple docstring'''
a__ = TimmBackboneModelTester(self )
a__ = ConfigTester(self , config_class=__A , has_text_modality=__A )
def lowercase ( self: str ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
a__ = '''resnet18'''
a__ = '''microsoft/resnet-18'''
a__ = AutoBackbone.from_pretrained(__A , use_timm_backbone=__A )
a__ = AutoBackbone.from_pretrained(__A )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
a__ = AutoBackbone.from_pretrained(__A , use_timm_backbone=__A , out_indices=[1, 2, 3] )
a__ = AutoBackbone.from_pretrained(__A , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def lowercase ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def lowercase ( self: List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def lowercase ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def lowercase ( self: Any ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def lowercase ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def lowercase ( self: List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def lowercase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def lowercase ( self: Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def lowercase ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
a__ ,a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__A )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __A )
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
a__ ,a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = True
a__ = self.has_attentions
# no need to test all models as different heads yield the same functionality
a__ = self.all_model_classes[0]
a__ = model_class(__A )
model.to(__A )
a__ = self._prepare_for_class(__A , __A )
a__ = model(**__A )
a__ = outputs[0][-1]
# Encoder-/Decoder-only models
a__ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a__ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase ( self: Optional[Any] ):
'''simple docstring'''
a__ ,a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__A )
model.to(__A )
model.eval()
a__ = model(**__A )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
a__ = copy.deepcopy(__A )
a__ = None
a__ = model_class(__A )
model.to(__A )
model.eval()
a__ = model(**__A )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
a__ = copy.deepcopy(__A )
a__ = False
a__ = model_class(__A )
model.to(__A )
model.eval()
a__ = model(**__A )
| 200
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__a : Dict = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 128,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowercase ( cls: Union[str, Any] ):
'''simple docstring'''
a__ = TOKEN
HfFolder.save_token(__A )
@classmethod
def lowercase ( cls: Any ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
a__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
a__ = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A , repo_id='''test-config''' , push_to_hub=__A , use_auth_token=self._token )
a__ = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def lowercase ( self: Optional[Any] ):
'''simple docstring'''
a__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
a__ = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__A , repo_id='''valid_org/test-config-org''' , push_to_hub=__A , use_auth_token=self._token )
a__ = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def lowercase ( self: List[str] ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
a__ = CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
a__ = AutoConfig.from_pretrained(F'{USER}/test-dynamic-config' , trust_remote_code=__A )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self: int ):
'''simple docstring'''
a__ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
a__ = c.n_embd + 1 # int
a__ = c.resid_pdrop + 1.0 # float
a__ = not c.scale_attn_weights # bool
a__ = c.summary_type + '''foo''' # str
c.update_from_string(
F'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(__A , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(__A , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(__A , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(__A , c.summary_type , '''mismatch for key: summary_type''' )
def lowercase ( self: List[str] ):
'''simple docstring'''
a__ = PretrainedConfig()
a__ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__A , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
a__ = [key for key, value in config_common_kwargs.items() if value == getattr(__A , __A )]
if len(__A ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
F' {", ".join(__A )}.' )
def lowercase ( self: str ):
'''simple docstring'''
with self.assertRaises(__A ):
# config is in subfolder, the following should not work without specifying the subfolder
a__ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
a__ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(__A )
def lowercase ( self: Dict ):
'''simple docstring'''
a__ = mock.Mock()
a__ = 500
a__ = {}
a__ = HTTPError
a__ = {}
# Download this model to make sure it's in the cache.
a__ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=__A ) as mock_head:
a__ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
a__ = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def lowercase ( self: Optional[Any] ):
'''simple docstring'''
a__ = AutoConfig.from_pretrained('''bert-base-cased''' )
a__ = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__A )
a__ = 2
json.dump(configuration.to_dict() , open(os.path.join(__A , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
a__ = AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
a__ = ['''config.42.0.0.json''']
a__ = 768
configuration.save_pretrained(__A )
shutil.move(os.path.join(__A , '''config.4.0.0.json''' ) , os.path.join(__A , '''config.42.0.0.json''' ) )
a__ = AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 768 )
def lowercase ( self: Optional[Any] ):
'''simple docstring'''
a__ = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
a__ = '''v4.0.0'''
a__ ,a__ = new_transformers.models.auto.AutoConfig.from_pretrained(
__A , return_unused_kwargs=__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__A , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
a__ = '''v3.0.0'''
a__ = old_transformers.models.auto.AutoConfig.from_pretrained(__A )
self.assertEqual(old_configuration.hidden_size , 768 )
| 200
| 1
|
"""simple docstring"""
import qiskit
def A_ (__a , __a ):
'''simple docstring'''
A_ = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
A_ = qiskit.QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
A_ = qiskit.execute(UpperCamelCase_ , UpperCamelCase_ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
UpperCamelCase_ : int = single_qubit_measure(2, 2)
print(F"""Total count for various states are: {counts}""")
| 115
|
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
def a_ ( UpperCamelCase_ ):
A_ = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
A_ = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , UpperCamelCase_ )
if matches:
A_ = float(matches[1] )
A_ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A_ = 1_0_0_1
A_ = "imagenet-1k-id2label.json"
A_ = "huggingface/label-files"
A_ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="dataset" ) , "r" ) )
A_ = {int(UpperCamelCase_ ) + 1: v for k, v in idalabel.items()}
A_ = "background"
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
return config
def a_ ( ):
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
A_ = get_mobilenet_va_config(UpperCamelCase_ )
# Load 🤗 model
A_ = MobileNetVaForImageClassification(UpperCamelCase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A_ = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 3_2} , )
A_ = image_processor(images=prepare_img() , return_tensors="pt" )
A_ = model(**UpperCamelCase_ )
A_ = outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
A_ = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
A_ = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
A_ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , UpperCamelCase_ , atol=1e-4 )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
print("Pushing to the hub..." )
A_ = "google/" + model_name
image_processor.push_to_hub(UpperCamelCase_ )
model.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 452
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
SCREAMING_SNAKE_CASE = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
SCREAMING_SNAKE_CASE = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class A_ ( UpperCamelCase_ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = """whisper"""
_SCREAMING_SNAKE_CASE : Optional[Any] = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , _A=51865 , _A=80 , _A=6 , _A=4 , _A=6 , _A=4 , _A=1536 , _A=1536 , _A=0.0 , _A=0.0 , _A=50257 , _A=True , _A=True , _A="gelu" , _A=256 , _A=0.0 , _A=0.0 , _A=0.0 , _A=0.02 , _A=False , _A=1500 , _A=448 , _A=50256 , _A=50256 , _A=50256 , _A=None , _A=[220, 50256] , _A=False , _A=256 , _A=False , _A=0.05 , _A=10 , _A=2 , _A=0.0 , _A=10 , _A=0 , _A=7 , **_A , ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = vocab_size
_UpperCAmelCase : List[Any] = num_mel_bins
_UpperCAmelCase : List[Any] = d_model
_UpperCAmelCase : int = encoder_layers
_UpperCAmelCase : Any = encoder_attention_heads
_UpperCAmelCase : Dict = decoder_layers
_UpperCAmelCase : List[Any] = decoder_attention_heads
_UpperCAmelCase : str = decoder_ffn_dim
_UpperCAmelCase : Tuple = encoder_ffn_dim
_UpperCAmelCase : Tuple = dropout
_UpperCAmelCase : int = attention_dropout
_UpperCAmelCase : List[str] = activation_dropout
_UpperCAmelCase : List[Any] = activation_function
_UpperCAmelCase : Optional[Any] = init_std
_UpperCAmelCase : List[Any] = encoder_layerdrop
_UpperCAmelCase : Tuple = decoder_layerdrop
_UpperCAmelCase : List[str] = use_cache
_UpperCAmelCase : Optional[int] = encoder_layers
_UpperCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase : Any = max_source_positions
_UpperCAmelCase : Optional[int] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_UpperCAmelCase : Optional[int] = classifier_proj_size
_UpperCAmelCase : List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase : Optional[int] = apply_spec_augment
_UpperCAmelCase : Optional[Any] = mask_time_prob
_UpperCAmelCase : Dict = mask_time_length
_UpperCAmelCase : Any = mask_time_min_masks
_UpperCAmelCase : Any = mask_feature_prob
_UpperCAmelCase : Any = mask_feature_length
_UpperCAmelCase : Dict = mask_feature_min_masks
_UpperCAmelCase : str = median_filter_width
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , decoder_start_token_id=_a , suppress_tokens=_a , begin_suppress_tokens=_a , **_a , )
class A_ ( UpperCamelCase_ ):
'''simple docstring'''
@property
def snake_case__ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
])
if self.use_past:
_UpperCAmelCase : Union[str, Any] = {0: """batch"""}
else:
_UpperCAmelCase : str = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_a , direction='''inputs''')
return common_inputs
def snake_case__ ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , _A = 22050 , _A = 5.0 , _A = 220 , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = OrderedDict()
_UpperCAmelCase : List[Any] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_a , framework=_a , sampling_rate=_a , time_duration=_a , frequency=_a , )
_UpperCAmelCase : Union[str, Any] = encoder_inputs["""input_features"""].shape[2]
_UpperCAmelCase : Union[str, Any] = encoder_sequence_length // 2 if self.use_past else seq_length
_UpperCAmelCase : Optional[Any] = super().generate_dummy_inputs(
preprocessor.tokenizer , _a , _a , _a , _a)
_UpperCAmelCase : Any = encoder_inputs.pop('''input_features''')
_UpperCAmelCase : Union[str, Any] = decoder_inputs.pop('''decoder_input_ids''')
if "past_key_values" in decoder_inputs:
_UpperCAmelCase : Optional[Any] = decoder_inputs.pop('''past_key_values''')
return dummy_inputs
@property
def snake_case__ ( self) -> float:
"""simple docstring"""
return 1e-3
| 712
|
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _lowerCamelCase ( __A : Optional[int] ) -> str:
return 1 / (1 + np.exp(-z ))
def _lowerCamelCase ( __A : Tuple , __A : List[Any] ) -> Optional[int]:
return (-y * np.log(__A ) - (1 - y) * np.log(1 - h )).mean()
def _lowerCamelCase ( __A : Tuple , __A : List[Any] , __A : int ) -> Any:
_UpperCAmelCase : int = np.dot(__A , __A )
return np.sum(y * scores - np.log(1 + np.exp(__A ) ) )
def _lowerCamelCase ( __A : List[Any] , __A : Union[str, Any] , __A : str , __A : Dict=70_000 ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = np.zeros(x.shape[1] )
for iterations in range(__A ):
_UpperCAmelCase : List[Any] = np.dot(__A , __A )
_UpperCAmelCase : Union[str, Any] = sigmoid_function(__A )
_UpperCAmelCase : Optional[Any] = np.dot(x.T , h - y ) / y.size
_UpperCAmelCase : Optional[Any] = theta - alpha * gradient # updating the weights
_UpperCAmelCase : List[Any] = np.dot(__A , __A )
_UpperCAmelCase : Any = sigmoid_function(__A )
_UpperCAmelCase : Union[str, Any] = cost_function(__A , __A )
if iterations % 100 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = datasets.load_iris()
SCREAMING_SNAKE_CASE = iris.data[:, :2]
SCREAMING_SNAKE_CASE = (iris.target != 0) * 1
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = logistic_reg(alpha, x, y, max_iterations=70000)
print('theta: ', theta) # printing the theta i.e our weights vector
def _lowerCamelCase ( __A : Any ) -> int:
return sigmoid_function(
np.dot(__A , __A ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = (x[:, 0].min(), x[:, 0].max())
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = (x[:, 1].min(), x[:, 1].max())
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
SCREAMING_SNAKE_CASE = np.c_[xxa.ravel(), xxa.ravel()]
SCREAMING_SNAKE_CASE = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 186
| 0
|
from math import asin, atan, cos, radians, sin, sqrt, tan
_snake_case = 6_3_7_8_1_3_7.0
_snake_case = 6_3_5_6_7_5_2.3_1_4_2_4_5
_snake_case = 6378137
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ ) -> float:
__UpperCAmelCase : List[Any] = (AXIS_A - AXIS_B) / AXIS_A
__UpperCAmelCase : List[Any] = atan((1 - flattening) * tan(radians(snake_case__ ) ) )
__UpperCAmelCase : List[str] = atan((1 - flattening) * tan(radians(snake_case__ ) ) )
__UpperCAmelCase : List[str] = radians(snake_case__ )
__UpperCAmelCase : Any = radians(snake_case__ )
# Equation
__UpperCAmelCase : Tuple = sin((phi_a - phi_a) / 2 )
__UpperCAmelCase : int = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__UpperCAmelCase : Tuple = sqrt(sin_sq_phi + (cos(snake_case__ ) * cos(snake_case__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 382
|
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )-> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 393
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__UpperCAmelCase :Optional[int] = logging.get_logger(__name__)
class a ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : List[Any] , *snake_case : Tuple , **snake_case : Optional[Any] ) -> None:
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 721
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__UpperCAmelCase :str = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__UpperCAmelCase :Tuple = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
__UpperCAmelCase :List[Any] = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self : Dict ) -> str:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def lowerCamelCase__ ( self : Any , snake_case : str , snake_case : int , snake_case : int = CHRF.CHAR_ORDER , snake_case : int = CHRF.WORD_ORDER , snake_case : int = CHRF.BETA , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , ) -> Optional[int]:
__UpperCAmelCase : Any = len(references[0] )
if any(len(snake_case ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__UpperCAmelCase : int = [[refs[i] for refs in references] for i in range(snake_case )]
__UpperCAmelCase : int = CHRF(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
__UpperCAmelCase : Dict = sb_chrf.corpus_score(snake_case , snake_case )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 266
| 0
|
"""simple docstring"""
def snake_case__ ( _snake_case : Dict ):
"""simple docstring"""
UpperCamelCase__ = [0] * len(_snake_case )
UpperCamelCase__ = []
UpperCamelCase__ = [1] * len(_snake_case )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_snake_case ) ):
if indegree[i] == 0:
queue.append(_snake_case )
while queue:
UpperCamelCase__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCamelCase__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_snake_case )
print(max(_snake_case ) )
# Adjacency list of Graph
A : int = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 516
|
"""simple docstring"""
def snake_case__ ( _snake_case : int ):
"""simple docstring"""
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCamelCase__ = len(bin(_snake_case )[3:] )
UpperCamelCase__ = bin(abs(_snake_case ) - (1 << binary_number_length) )[3:]
UpperCamelCase__ = (
(
"1"
+ "0" * (binary_number_length - len(_snake_case ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 516
| 1
|
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__SCREAMING_SNAKE_CASE : Optional[int] = logging.getLogger(__name__)
def snake_case_ ( lowercase__ : Optional[int] , lowercase__ : str ):
'''simple docstring'''
if os.path.exists(lowercase__ ):
if os.path.exists(os.path.join(lowercase__ , """config.json""" ) ) and os.path.isfile(
os.path.join(lowercase__ , """config.json""" ) ):
os.remove(os.path.join(lowercase__ , """config.json""" ) )
if os.path.exists(os.path.join(lowercase__ , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(lowercase__ , """pytorch_model.bin""" ) ):
os.remove(os.path.join(lowercase__ , """pytorch_model.bin""" ) )
else:
os.makedirs(lowercase__ )
model.save_pretrained(lowercase__ )
def snake_case_ ( lowercase__ : List[str] , lowercase__ : Tuple=False ):
'''simple docstring'''
_lowerCAmelCase =2
if unlogit:
_lowerCAmelCase =torch.pow(lowercase__ , lowercase__ )
_lowerCAmelCase =p * torch.log(lowercase__ )
_lowerCAmelCase =0
return -plogp.sum(dim=-1 )
def snake_case_ ( lowercase__ : List[str] ):
'''simple docstring'''
logger.info("""lv, h >\t""" + """\t""".join(f"{x + 1}" for x in range(len(lowercase__ ) ) ) )
for row in range(len(lowercase__ ) ):
if tensor.dtype != torch.long:
logger.info(f"layer {row + 1}:\t" + """\t""".join(f"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(f"layer {row + 1}:\t" + """\t""".join(f"{x:d}" for x in tensor[row].cpu().data ) )
def snake_case_ ( lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : List[str]=True , lowercase__ : Optional[Any]=True , lowercase__ : Optional[int]=None , lowercase__ : List[str]=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase =model.config.num_hidden_layers, model.config.num_attention_heads
_lowerCAmelCase =torch.zeros(lowercase__ , lowercase__ ).to(args.device )
_lowerCAmelCase =torch.zeros(lowercase__ , lowercase__ ).to(args.device )
if head_mask is None:
_lowerCAmelCase =torch.ones(lowercase__ , lowercase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowercase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_lowerCAmelCase =None
_lowerCAmelCase =0.0
_lowerCAmelCase =0.0
for step, inputs in enumerate(tqdm(lowercase__ , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
_lowerCAmelCase =tuple(t.to(args.device ) for t in inputs )
((_lowerCAmelCase ) , ) =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_lowerCAmelCase =model(lowercase__ , labels=lowercase__ , head_mask=lowercase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowercase__ ):
_lowerCAmelCase =entropy(attn.detach() , lowercase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowercase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_lowerCAmelCase =2
_lowerCAmelCase =torch.pow(torch.pow(lowercase__ , lowercase__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
_lowerCAmelCase =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(lowercase__ )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(lowercase__ )
logger.info("""Head ranked by importance scores""" )
_lowerCAmelCase =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_lowerCAmelCase =torch.arange(
head_importance.numel() , device=args.device )
_lowerCAmelCase =head_ranks.view_as(lowercase__ )
print_ad_tensor(lowercase__ )
return attn_entropy, head_importance, total_loss
def snake_case_ ( lowercase__ : str , lowercase__ : int , lowercase__ : Tuple ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =compute_heads_importance(lowercase__ , lowercase__ , lowercase__ , compute_entropy=lowercase__ )
_lowerCAmelCase =1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , lowercase__ , original_score * args.masking_threshold )
_lowerCAmelCase =torch.ones_like(lowercase__ )
_lowerCAmelCase =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_lowerCAmelCase =original_score
while current_score >= original_score * args.masking_threshold:
_lowerCAmelCase =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_lowerCAmelCase =float("""Inf""" )
_lowerCAmelCase =head_importance.view(-1 ).sort()[1]
if len(lowercase__ ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
_lowerCAmelCase =current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
_lowerCAmelCase =new_head_mask.view(-1 )
_lowerCAmelCase =0.0
_lowerCAmelCase =new_head_mask.view_as(lowercase__ )
_lowerCAmelCase =new_head_mask.clone().detach()
print_ad_tensor(lowercase__ )
# Compute metric and head importance again
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =compute_heads_importance(
lowercase__ , lowercase__ , lowercase__ , compute_entropy=lowercase__ , head_mask=lowercase__ )
_lowerCAmelCase =1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , lowercase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info("""Final head mask""" )
print_ad_tensor(lowercase__ )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def snake_case_ ( lowercase__ : List[Any] , lowercase__ : int , lowercase__ : int , lowercase__ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase =datetime.now()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =compute_heads_importance(
lowercase__ , lowercase__ , lowercase__ , compute_entropy=lowercase__ , compute_importance=lowercase__ , head_mask=lowercase__ )
_lowerCAmelCase =1 / loss
_lowerCAmelCase =datetime.now() - before_time
_lowerCAmelCase =sum(p.numel() for p in model.parameters() )
_lowerCAmelCase ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowercase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowercase__ , lowercase__ ):
_lowerCAmelCase =[
v,
]
assert sum(len(lowercase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowercase__ )
_lowerCAmelCase =sum(p.numel() for p in model.parameters() )
_lowerCAmelCase =datetime.now()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =compute_heads_importance(
lowercase__ , lowercase__ , lowercase__ , compute_entropy=lowercase__ , compute_importance=lowercase__ , head_mask=lowercase__ , actually_pruned=lowercase__ , )
_lowerCAmelCase =1 / loss
_lowerCAmelCase =datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , lowercase__ , lowercase__ , pruned_num_params / original_num_params * 1_00 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , lowercase__ , lowercase__ )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 1_00 )
save_model(lowercase__ , args.output_dir )
def snake_case_ ( ):
'''simple docstring'''
_lowerCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=lowercase__ , type=lowercase__ , required=lowercase__ , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=lowercase__ , type=lowercase__ , required=lowercase__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=lowercase__ , type=lowercase__ , required=lowercase__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=lowercase__ , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=lowercase__ , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=lowercase__ , type=lowercase__ , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=lowercase__ , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=lowercase__ , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=lowercase__ , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=lowercase__ , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=1_28 , type=lowercase__ , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=lowercase__ , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=lowercase__ , default=42 )
parser.add_argument("""--local_rank""" , type=lowercase__ , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=lowercase__ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=lowercase__ , default="""""" , help="""Can be used for distant debugging.""" )
_lowerCAmelCase =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowercase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_lowerCAmelCase =torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
_lowerCAmelCase =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_lowerCAmelCase =torch.device("""cuda""" , args.local_rank )
_lowerCAmelCase =1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_lowerCAmelCase =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_lowerCAmelCase =nn.parallel.DistributedDataParallel(
lowercase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowercase__ )
elif args.n_gpu > 1:
_lowerCAmelCase =nn.DataParallel(lowercase__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowercase__ )
torch.save(lowercase__ , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , lowercase__ )
# Prepare dataset
_lowerCAmelCase =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_lowerCAmelCase =(torch.from_numpy(lowercase__ ),)
_lowerCAmelCase =TensorDataset(*lowercase__ )
_lowerCAmelCase =RandomSampler(lowercase__ )
_lowerCAmelCase =DataLoader(lowercase__ , sampler=lowercase__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowercase__ , lowercase__ , lowercase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_lowerCAmelCase =mask_heads(lowercase__ , lowercase__ , lowercase__ )
prune_heads(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 704
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def snake_case_ ( lowercase__ : Optional[Any] , lowercase__ : List[Any]=False ):
'''simple docstring'''
_lowerCAmelCase =OmegaConf.load(lowercase__ )
if display:
print(yaml.dump(OmegaConf.to_container(lowercase__ ) ) )
return config
def snake_case_ ( lowercase__ : Any , lowercase__ : Optional[Any]=None , lowercase__ : Optional[Any]=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase ="""./model_checkpoints/vqgan_only.yaml"""
_lowerCAmelCase =load_config(lowercase__ , display=lowercase__ )
_lowerCAmelCase =VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase ="""./model_checkpoints/vqgan_only.pt"""
_lowerCAmelCase =torch.load(lowercase__ , map_location=lowercase__ )
if ".ckpt" in ckpt_path:
_lowerCAmelCase =sd["""state_dict"""]
model.load_state_dict(lowercase__ , strict=lowercase__ )
model.to(lowercase__ )
del sd
return model
def snake_case_ ( lowercase__ : int , lowercase__ : Any ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =model.encode(lowercase__ )
print(f"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase =model.decode(lowercase__ )
return xrec
def snake_case_ ( lowercase__ : str , lowercase__ : str=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase =string.rsplit(""".""" , 1 )
if reload:
_lowerCAmelCase =importlib.import_module(lowercase__ )
importlib.reload(lowercase__ )
return getattr(importlib.import_module(lowercase__ , package=lowercase__ ) , cls )
def snake_case_ ( lowercase__ : str ):
'''simple docstring'''
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def snake_case_ ( lowercase__ : List[str] , lowercase__ : List[Any] , lowercase__ : Optional[int]=True , lowercase__ : Dict=True ):
'''simple docstring'''
_lowerCAmelCase =instantiate_from_config(lowercase__ )
if sd is not None:
model.load_state_dict(lowercase__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def snake_case_ ( lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : str , lowercase__ : Tuple ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase =torch.load(lowercase__ , map_location="""cpu""" )
_lowerCAmelCase =pl_sd["""global_step"""]
print(f"loaded model from global step {global_step}." )
else:
_lowerCAmelCase ={"""state_dict""": None}
_lowerCAmelCase =None
_lowerCAmelCase =load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=lowercase__ , eval_mode=lowercase__ )["""model"""]
return model, global_step
| 149
| 0
|
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
UpperCAmelCase_ : Any = False
try:
UpperCAmelCase_ : Dict = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase__ :
def __init__( self : Union[str, Any],__A : str = None,__A : list = [] ):
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Tuple = choices
_lowerCamelCase : str = prompt
if sys.platform == "win32":
_lowerCamelCase : List[str] = "*"
else:
_lowerCamelCase : int = "➔ "
def lowerCamelCase_ ( self : Optional[Any],__A : Any,__A : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index],3_2,__A )
else:
forceWrite(self.choices[index],__A )
def lowerCamelCase_ ( self : Union[str, Any],__A : int ):
if index == self.position:
forceWrite(f' {self.arrow_char} ' )
self.write_choice(__A )
else:
forceWrite(f' {self.choices[index]}' )
reset_cursor()
def lowerCamelCase_ ( self : Union[str, Any],__A : Direction,__A : int = 1 ):
_lowerCamelCase : Any = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__A )
move_cursor(__A,direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def lowerCamelCase_ ( self : str ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def lowerCamelCase_ ( self : Tuple ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def lowerCamelCase_ ( self : List[Any] ):
move_cursor(len(self.choices ) - self.position,"DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def lowerCamelCase_ ( self : List[Any] ):
move_cursor(len(self.choices ) - self.position,"DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__A )] for number in range(1_0 )] )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = int(chr(self.current_selection ) )
_lowerCamelCase : int = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP,-movement )
elif self.position < index:
self.move_direction(Direction.DOWN,__A )
else:
return
else:
return
def lowerCamelCase_ ( self : Optional[int],__A : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt,"\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter","\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter","\n" )
_lowerCamelCase : List[str] = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__A )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position,"UP" )
with cursor.hide():
while True:
if in_colab:
try:
_lowerCamelCase : Optional[Any] = int(builtins.input() )
except ValueError:
_lowerCamelCase : int = default_choice
else:
_lowerCamelCase : Union[str, Any] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1,"UP" )
clear_line()
self.write_choice(__A,"\n" )
return choice
| 44
|
'''simple docstring'''
def lowercase_ ( __A : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__A , __A ):
return 0
elif n == 2:
return 1
else:
lowercase : Tuple =[0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase_ ( __A : int ) -> int:
"""simple docstring"""
lowercase : List[str] =0
lowercase : str =2
while digits < n:
index += 1
lowercase : int =len(str(fibonacci(__A ) ) )
return index
def lowercase_ ( __A : int = 1_0_0_0 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__A )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 94
| 0
|
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def __magic_name__ ( __snake_case : Dict , __snake_case : Union[str, Any]=1000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowercase : Any = n - 1
lowercase : List[str] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowercase : List[Any] = 0
while count < prec:
lowercase : List[Any] = random.randint(2 , n - 1 )
lowercase : int = bin_exp_mod(__snake_case , __snake_case , __snake_case )
if b != 1:
lowercase : List[Any] = True
for _ in range(__snake_case ):
if b == n - 1:
lowercase : Any = False
break
lowercase : Union[str, Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_A : Any = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 518
|
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_A : Tuple = logging.get_logger(__name__)
_A : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
_A : Optional[Any] = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
_A : str = {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
_A : List[str] = """▁"""
class a__ ( a_ ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , _a , _a="</s>" , _a="<unk>" , _a="<pad>" , _a=100 , _a=None , _a = None , _a=True , **_a , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowercase : str = [f"""<extra_id_{i}>""" for i in range(_a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowercase : str = len(set(filter(lambda _a : bool("extra_id" in str(_a ) ) , _a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
f"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
lowercase : Optional[Any] = legacy
lowercase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_a , unk_token=_a , pad_token=_a , extra_ids=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , legacy=_a , **_a , )
lowercase : Optional[Any] = vocab_file
lowercase : Union[str, Any] = extra_ids
lowercase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@staticmethod
def __magic_name__ ( _a , _a , _a ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
lowercase : str = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , _a , )
return max_model_length
@property
def __magic_name__ ( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def __magic_name__ ( self ):
lowercase : Any = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __magic_name__ ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_a )) + [1]
return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def __magic_name__ ( self ):
return list(
set(filter(lambda _a : bool(re.search(R"<extra_id_\d+>" , _a ) ) is not None , self.additional_special_tokens ) ) )
def __magic_name__ ( self ):
return [self._convert_token_to_id(_a ) for token in self.get_sentinel_tokens()]
def __magic_name__ ( self , _a ):
if len(_a ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __magic_name__ ( self , _a , _a = None ):
lowercase : Any = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __magic_name__ ( self , _a , _a = None ):
lowercase : Any = self._add_eos_if_not_present(_a )
if token_ids_a is None:
return token_ids_a
else:
lowercase : str = self._add_eos_if_not_present(_a )
return token_ids_a + token_ids_a
def __getstate__( self ):
lowercase : List[str] = self.__dict__.copy()
lowercase : Optional[Any] = None
return state
def __setstate__( self , _a ):
lowercase : str = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase : str = {}
lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__ ( self , _a , **_a ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
lowercase : Optional[int] = SPIECE_UNDERLINE + text.replace(_a , " " )
return super().tokenize(_a , **_a )
def __magic_name__ ( self , _a , **_a ):
if not self.legacy:
lowercase : Dict = text.startswith(_a )
if is_first:
lowercase : Dict = text[1:]
lowercase : Tuple = self.sp_model.encode(_a , out_type=_a )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(_a ):
lowercase : Tuple = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def __magic_name__ ( self , _a ):
if token.startswith("<extra_id_" ):
lowercase : Optional[int] = re.match(R"<extra_id_(\d+)>" , _a )
lowercase : str = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_a )
def __magic_name__ ( self , _a ):
if index < self.sp_model.get_piece_size():
lowercase : Union[str, Any] = self.sp_model.IdToPiece(_a )
else:
lowercase : Union[str, Any] = f"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def __magic_name__ ( self , _a ):
lowercase : Tuple = []
lowercase : int = ""
lowercase : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
lowercase : List[Any] = True
lowercase : Dict = []
else:
current_sub_tokens.append(_a )
lowercase : Dict = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __magic_name__ ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : int = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
lowercase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 518
| 1
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCamelCase_ ( yaml.SafeLoader ):
def _snake_case ( self :List[str] , __A :List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE__ = [tuple(__A ) if isinstance(__A , __A ) else key for key in keys]
SCREAMING_SNAKE_CASE__ = Counter(__A )
SCREAMING_SNAKE_CASE__ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def _snake_case ( self :Optional[int] , __A :List[Any] , __A :Union[str, Any]=False ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super().construct_mapping(__A , deep=__A )
self._check_no_duplicates_on_constructed_node(__A )
return mapping
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE__ = full_content[1:].index("""---""" ) + 1
SCREAMING_SNAKE_CASE__ = """\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(UpperCamelCase__ )
class UpperCamelCase_ ( UpperCamelCase__ ):
# class attributes
lowerCamelCase_ = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def _snake_case ( cls :int , __A :Path ) -> "DatasetMetadata":
"""simple docstring"""
with open(__A , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__A )
else:
return cls()
def _snake_case ( self :int , __A :Path ) -> str:
"""simple docstring"""
if path.exists():
with open(__A , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE__ = readme_file.read()
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = self._to_readme(__A )
with open(__A , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(__A )
def _snake_case ( self :int , __A :Optional[str] = None ) -> str:
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = _split_yaml_from_readme(__A )
SCREAMING_SNAKE_CASE__ = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
SCREAMING_SNAKE_CASE__ = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def _snake_case ( cls :Tuple , __A :str ) -> "DatasetMetadata":
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = yaml.load(__A , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE__ = {
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__A )
def _snake_case ( self :Union[str, Any] ) -> str:
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__A , allow_unicode=__A , encoding="""utf-8""" , ).decode("""utf-8""" )
_lowerCamelCase = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_lowerCamelCase = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
_lowerCamelCase = ap.parse_args()
_lowerCamelCase = Path(args.readme_filepath)
_lowerCamelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 6
|
'''simple docstring'''
from manim import *
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""CPU""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
_lowerCAmelCase = [mem.copy() for i in range(1 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""GPU""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.align_to(_lowercase , _lowercase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowercase )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""Model""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) , )
_lowerCAmelCase = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
_lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=2.5 ) , Write(_lowercase ) , Write(_lowercase ) )
self.add(_lowercase )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, rect in enumerate(_lowercase ):
_lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.7 )
cpu_target.move_to(_lowercase )
cpu_target.generate_target()
_lowerCAmelCase = 0.46 / 4
_lowerCAmelCase = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowercase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_lowercase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_lowercase , buff=0.0 )
cpu_targs.append(_lowercase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowercase ) )
second_animations.append(MoveToTarget(_lowercase , run_time=1.5 ) )
self.play(*_lowercase )
self.play(*_lowercase )
self.wait()
| 5
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Optional[int] = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 710
|
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : dict ):
UpperCamelCase_ : str = set()
# edges = list of graph's edges
UpperCamelCase_ : Any = get_edges(_SCREAMING_SNAKE_CASE )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
UpperCamelCase_,UpperCamelCase_ : Optional[Any] = edges.pop()
chosen_vertices.add(_SCREAMING_SNAKE_CASE )
chosen_vertices.add(_SCREAMING_SNAKE_CASE )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_SCREAMING_SNAKE_CASE )
return chosen_vertices
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : dict ):
UpperCamelCase_ : Dict = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 138
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = tempfile.mkdtemp()
# fmt: off
__magic_name__ :int = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__magic_name__ :Any = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
__magic_name__ :Tuple = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__magic_name__ :int = {'''unk_token''': '''<unk>'''}
__magic_name__ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__magic_name__ :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
__magic_name__ :Any = {
'''do_resize''': True,
'''size''': 2_0,
'''do_center_crop''': True,
'''crop_size''': 1_8,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
__magic_name__ :int = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def A ( self , **__lowerCAmelCase ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A ( self , **__lowerCAmelCase ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A ( self , **__lowerCAmelCase ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__magic_name__ :Union[str, Any] = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.get_tokenizer()
__magic_name__ :Union[str, Any] = self.get_rust_tokenizer()
__magic_name__ :Any = self.get_image_processor()
__magic_name__ :Optional[int] = CLIPSegProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
__magic_name__ :List[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
__magic_name__ :Tuple = CLIPSegProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
__magic_name__ :Dict = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__magic_name__ :Optional[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__magic_name__ :int = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
__magic_name__ :Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.get_image_processor()
__magic_name__ :Dict = self.get_tokenizer()
__magic_name__ :int = CLIPSegProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__magic_name__ :List[Any] = self.prepare_image_inputs()
__magic_name__ :str = image_processor(__lowerCAmelCase , return_tensors='''np''' )
__magic_name__ :List[str] = processor(images=__lowerCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.get_image_processor()
__magic_name__ :int = self.get_tokenizer()
__magic_name__ :List[Any] = CLIPSegProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__magic_name__ :Optional[int] = '''lower newer'''
__magic_name__ :Dict = processor(text=__lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer(__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A ( self ):
"""simple docstring"""
__magic_name__ :str = self.get_image_processor()
__magic_name__ :List[str] = self.get_tokenizer()
__magic_name__ :Union[str, Any] = CLIPSegProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__magic_name__ :List[str] = '''lower newer'''
__magic_name__ :Any = self.prepare_image_inputs()
__magic_name__ :List[Any] = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.get_image_processor()
__magic_name__ :Optional[int] = self.get_tokenizer()
__magic_name__ :List[str] = CLIPSegProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__magic_name__ :List[str] = self.prepare_image_inputs()
__magic_name__ :Dict = self.prepare_image_inputs()
__magic_name__ :List[str] = processor(images=__lowerCAmelCase , visual_prompt=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.get_image_processor()
__magic_name__ :Tuple = self.get_tokenizer()
__magic_name__ :Optional[Any] = CLIPSegProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__magic_name__ :List[Any] = processor.batch_decode(__lowerCAmelCase )
__magic_name__ :Tuple = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
| 0
|
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return abs(lowerCamelCase__ ) if a == 0 else greatest_common_divisor(b % a , lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowerCAmelCase__ , lowerCAmelCase__ = y, x % y
return abs(lowerCamelCase__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
try:
lowerCAmelCase__ = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
lowerCAmelCase__ = int(nums[0] )
lowerCAmelCase__ = int(nums[1] )
print(
f"""greatest_common_divisor({num_a}, {num_a}) = """
f"""{greatest_common_divisor(lowerCamelCase__ , lowerCamelCase__ )}""" )
print(f"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowerCamelCase__ , lowerCamelCase__ )}""" )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 644
| 0
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase__ : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
__UpperCamelCase : Optional[str] = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__UpperCamelCase : Optional[str] = field(
default=a__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCamelCase : Optional[str] = field(
default=a__ , metadata={'''help''': '''The column name of the images in the files.'''} )
__UpperCamelCase : Optional[str] = field(default=a__ , metadata={'''help''': '''A folder containing the training data.'''} )
__UpperCamelCase : Optional[str] = field(default=a__ , metadata={'''help''': '''A folder containing the validation data.'''} )
__UpperCamelCase : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
__UpperCamelCase : Optional[int] = field(
default=a__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__UpperCamelCase : Optional[int] = field(
default=a__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
if self.train_dir is not None:
SCREAMING_SNAKE_CASE__ : Any = self.train_dir
if self.validation_dir is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.validation_dir
SCREAMING_SNAKE_CASE__ : List[Any] = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
__UpperCamelCase : str = field(
default=a__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
__UpperCamelCase : Optional[str] = field(
default=a__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
__UpperCamelCase : Optional[str] = field(
default=a__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
__UpperCamelCase : Optional[str] = field(
default=a__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
__UpperCamelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__UpperCamelCase : str = field(default=a__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__UpperCamelCase : bool = field(
default=a__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__UpperCamelCase : float = field(
default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
__UpperCamelCase : bool = field(
default=a__ , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : float = field(
default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def lowercase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" ,_snake_case ,_snake_case )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[str] = training_args.get_process_log_level()
logger.setLevel(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE__ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE__ : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
SCREAMING_SNAKE_CASE__ : str = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,data_files=data_args.data_files ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE__ : Any = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split ,_snake_case ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE__ : Any = ds["""train"""].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE__ : Any = split["""train"""]
SCREAMING_SNAKE_CASE__ : Optional[int] = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
SCREAMING_SNAKE_CASE__ : Tuple = ViTMAEConfig.from_pretrained(model_args.config_name ,**_snake_case )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ : str = ViTMAEConfig.from_pretrained(model_args.model_name_or_path ,**_snake_case )
else:
SCREAMING_SNAKE_CASE__ : int = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
SCREAMING_SNAKE_CASE__ : List[Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name ,**_snake_case )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ : Tuple = ViTImageProcessor.from_pretrained(model_args.model_name_or_path ,**_snake_case )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ : Optional[int] = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=_snake_case ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
logger.info("""Training new model from scratch""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ViTMAEForPreTraining(_snake_case )
if training_args.do_train:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ds["""train"""].column_names
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = data_args.image_column_name
elif "image" in column_names:
SCREAMING_SNAKE_CASE__ : Tuple = """image"""
elif "img" in column_names:
SCREAMING_SNAKE_CASE__ : Dict = """img"""
else:
SCREAMING_SNAKE_CASE__ : List[str] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor.size["""shortest_edge"""]
else:
SCREAMING_SNAKE_CASE__ : Tuple = (image_processor.size["""height"""], image_processor.size["""width"""])
SCREAMING_SNAKE_CASE__ : Dict = Compose(
[
Lambda(lambda _snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(_snake_case ,scale=(0.2, 1.0) ,interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean ,std=image_processor.image_std ),
] )
def preprocess_images(_snake_case ):
SCREAMING_SNAKE_CASE__ : str = [transforms(_snake_case ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_snake_case )
# Compute absolute learning rate
SCREAMING_SNAKE_CASE__ : str = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
SCREAMING_SNAKE_CASE__ : str = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
SCREAMING_SNAKE_CASE__ : Tuple = Trainer(
model=_snake_case ,args=_snake_case ,train_dataset=ds["""train"""] if training_args.do_train else None ,eval_dataset=ds["""validation"""] if training_args.do_eval else None ,tokenizer=_snake_case ,data_collator=_snake_case ,)
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE__ : Any = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE__ : Dict = last_checkpoint
SCREAMING_SNAKE_CASE__ : List[str] = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model()
trainer.log_metrics("""train""" ,train_result.metrics )
trainer.save_metrics("""train""" ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE__ : Optional[Any] = trainer.evaluate()
trainer.log_metrics("""eval""" ,_snake_case )
trainer.save_metrics("""eval""" ,_snake_case )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
def lowercase_ ( _snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 707
|
"""simple docstring"""
import logging
from transformers import PretrainedConfig
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
UpperCAmelCase__ : Any = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] = '''bertabs'''
def __init__(self , SCREAMING_SNAKE_CASE__=3_05_22 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=0.2 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=20_48 , SCREAMING_SNAKE_CASE__=0.2 , **SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Any = max_pos
SCREAMING_SNAKE_CASE__ : List[Any] = enc_layers
SCREAMING_SNAKE_CASE__ : Tuple = enc_hidden_size
SCREAMING_SNAKE_CASE__ : Dict = enc_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = enc_ff_size
SCREAMING_SNAKE_CASE__ : Dict = enc_dropout
SCREAMING_SNAKE_CASE__ : Any = dec_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dec_hidden_size
SCREAMING_SNAKE_CASE__ : Any = dec_heads
SCREAMING_SNAKE_CASE__ : List[Any] = dec_ff_size
SCREAMING_SNAKE_CASE__ : str = dec_dropout
| 545
| 0
|
def a ( snake_case__: Optional[int] ):
'''simple docstring'''
lowercase_ = []
lowercase_ = []
lowercase_ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
lowercase_ = len(snake_case__ ) if (len(snake_case__ ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(snake_case__ ) , '''Postfix'''.center(snake_case__ ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(snake_case__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(snake_case__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(snake_case__ ) == 0:
stack.append(snake_case__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(snake_case__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(snake_case__ ) # push x to stack
print(
x.center(8 ) , (''''''.join(snake_case__ )).ljust(snake_case__ ) , (''''''.join(snake_case__ )).ljust(snake_case__ ) , sep=''' | ''' , ) # Output in tabular format
while len(snake_case__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(snake_case__ )).ljust(snake_case__ ) , (''''''.join(snake_case__ )).ljust(snake_case__ ) , sep=''' | ''' , ) # Output in tabular format
return "".join(snake_case__ ) # return Postfix as str
def a ( snake_case__: List[Any] ):
'''simple docstring'''
lowercase_ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(snake_case__ ) ):
if infix[i] == "(":
lowercase_ = ''')''' # change "(" to ")"
elif infix[i] == ")":
lowercase_ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(snake_case__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__a = input('\nEnter an Infix Equation = ') # Input an Infix equation
__a = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 97
|
def lowerCamelCase ( a_ ) -> list:
lowerCAmelCase_ = len(a_ )
for _ in range(a_ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowerCAmelCase_ , lowerCAmelCase_ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
lowerCamelCase_ = list(range(1_0, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 318
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Union[str, Any] = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : int = 'falcon'
A_ : int = ['past_key_values']
def __init__( self : Optional[Any] , __snake_case : Tuple=65_024 , __snake_case : List[str]=4_544 , __snake_case : Optional[Any]=32 , __snake_case : Any=71 , __snake_case : str=1E-5 , __snake_case : List[str]=0.02 , __snake_case : List[Any]=True , __snake_case : Dict=0.0 , __snake_case : Optional[Any]=0.0 , __snake_case : Any=None , __snake_case : List[Any]=False , __snake_case : Dict=False , __snake_case : Optional[int]=True , __snake_case : List[Any]=True , __snake_case : Optional[Any]=False , __snake_case : Dict=11 , __snake_case : List[str]=11 , **__snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : int = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ : Union[str, Any] = kwargs.pop('''n_embed''' , __snake_case )
UpperCAmelCase_ : str = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : Optional[int] = layer_norm_epsilon
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[int] = use_cache
UpperCAmelCase_ : List[Any] = hidden_dropout
UpperCAmelCase_ : Any = attention_dropout
UpperCAmelCase_ : Tuple = bos_token_id
UpperCAmelCase_ : List[Any] = eos_token_id
UpperCAmelCase_ : Any = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ : Optional[int] = alibi
UpperCAmelCase_ : Dict = new_decoder_architecture
UpperCAmelCase_ : List[Any] = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ : Tuple = parallel_attn
UpperCAmelCase_ : List[Any] = bias
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return not self.alibi
| 641
| 1
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCAmelCase__ : Union[str, Any] =(3, 9, -11, 0, 7, 5, 1, -1)
lowerCAmelCase__ : Optional[Any] =(4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase = 42
_UpperCAmelCase = 42
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = None
for i in sorted(lowerCAmelCase__ , reverse=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Dict = Node(lowerCAmelCase__ , self.head )
def __iter__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE_ : Optional[Any] = node.next_node
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self ):
"""simple docstring"""
return " -> ".join([str(lowerCAmelCase__ ) for node in self] )
def a__ ( A__, A__ ):
return SortedLinkedList(list(_A ) + list(_A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ : List[str] =SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 101
|
__UpperCamelCase = 2_5_6
# Modulus to hash a string
__UpperCamelCase = 1_0_0_0_0_0_3
def UpperCamelCase_( _A :str , _A :str )-> bool:
UpperCamelCase__ = len(_A )
UpperCamelCase__ = len(_A )
if p_len > t_len:
return False
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = 1
# Calculating the hash of pattern and substring of text
for i in range(_A ):
UpperCamelCase__ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
UpperCamelCase__ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
UpperCamelCase__ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
UpperCamelCase__ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCamelCase_( )-> None:
UpperCamelCase__ = "abc1abc12"
UpperCamelCase__ = "alskfjaldsabc1abc1abc12k23adsfabcabc"
UpperCamelCase__ = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_A , _A ) and not rabin_karp(_A , _A )
# Test 2)
UpperCamelCase__ = "ABABX"
UpperCamelCase__ = "ABABZABABYABABX"
assert rabin_karp(_A , _A )
# Test 3)
UpperCamelCase__ = "AAAB"
UpperCamelCase__ = "ABAAAAAB"
assert rabin_karp(_A , _A )
# Test 4)
UpperCamelCase__ = "abcdabcy"
UpperCamelCase__ = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_A , _A )
# Test 5)
UpperCamelCase__ = "Lü"
UpperCamelCase__ = "Lüsai"
assert rabin_karp(_A , _A )
UpperCamelCase__ = "Lue"
assert not rabin_karp(_A , _A )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 551
| 0
|
from __future__ import annotations
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None ) -> None:
"""simple docstring"""
if start is None:
_SCREAMING_SNAKE_CASE = 0
if end is None:
_SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) - 1
if start >= end:
return
_SCREAMING_SNAKE_CASE = (start + end) // 2
slowsort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
slowsort(SCREAMING_SNAKE_CASE_ , mid + 1 , SCREAMING_SNAKE_CASE_ )
if sequence[end] < sequence[mid]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = sequence[mid], sequence[end]
slowsort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 714
|
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
"""simple docstring"""
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0
| 0
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , a : Any , a : str=13 , a : Union[str, Any]=7 , a : int=True , a : Optional[Any]=True , a : Optional[int]=True , a : int=True , a : List[Any]=99 , a : List[Any]=16 , a : str=36 , a : Dict=6 , a : str=6 , a : List[str]=6 , a : Any=37 , a : Optional[Any]="gelu" , a : List[str]=0.1 , a : Tuple=0.1 , a : int=512 , a : Optional[Any]=16 , a : List[str]=2 , a : List[str]=0.02 , a : int=3 , a : int=4 , a : Union[str, Any]=None , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : int = seq_length
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = use_input_mask
SCREAMING_SNAKE_CASE : str = use_token_type_ids
SCREAMING_SNAKE_CASE : int = use_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = embedding_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_hidden_groups
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = num_labels
SCREAMING_SNAKE_CASE : int = num_choices
SCREAMING_SNAKE_CASE : str = scope
def __UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __UpperCamelCase ( self : Union[str, Any] , a : Optional[int] , a : Optional[Any] , a : int , a : Dict , a : int , a : Union[str, Any] , a : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = AlbertModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(a , attention_mask=a , token_type_ids=a )
SCREAMING_SNAKE_CASE : Optional[int] = model(a , token_type_ids=a )
SCREAMING_SNAKE_CASE : Dict = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase ( self : str , a : int , a : Optional[int] , a : Optional[Any] , a : Dict , a : str , a : int , a : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = AlbertForPreTraining(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
a , attention_mask=a , token_type_ids=a , labels=a , sentence_order_label=a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __UpperCamelCase ( self : Any , a : Any , a : Any , a : Optional[int] , a : str , a : int , a : str , a : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = AlbertForMaskedLM(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : str , a : Tuple , a : Optional[Any] , a : Union[str, Any] , a : Optional[int] , a : Optional[int] , a : List[Any] , a : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = AlbertForQuestionAnswering(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(
a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[Any] , a : Tuple , a : Tuple , a : int , a : List[Any] , a : List[Any] , a : Dict , a : Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Optional[int] = AlbertForSequenceClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : int = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[str] , a : List[str] , a : str , a : Optional[int] , a : Optional[int] , a : str , a : List[Any] , a : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = AlbertForTokenClassification(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : str = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : str , a : Optional[int] , a : Tuple , a : Optional[int] , a : Union[str, Any] , a : Union[str, Any] , a : List[Any] , a : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_choices
SCREAMING_SNAKE_CASE : Any = AlbertForMultipleChoice(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[str] = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,
) : int = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
def __UpperCamelCase ( self : List[str] , a : int , a : List[str] , a : Optional[int]=False ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class in get_values(a ):
SCREAMING_SNAKE_CASE : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = AlbertModelTester(self )
SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=a , hidden_size=37 )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : Any = type
self.model_tester.create_and_check_model(*a )
@slow
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = AlbertModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = AlbertModel.from_pretrained("albert-base-v2" )
SCREAMING_SNAKE_CASE : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(a , attention_mask=a )[0]
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 25
|
'''simple docstring'''
def __A ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if number > 0:
raise ValueError("input must be a negative integer" )
__SCREAMING_SNAKE_CASE : Tuple = len(bin(_SCREAMING_SNAKE_CASE )[3:] )
__SCREAMING_SNAKE_CASE : Optional[int] = bin(abs(_SCREAMING_SNAKE_CASE ) - (1 << binary_number_length) )[3:]
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(
"1"
+ "0" * (binary_number_length - len(_SCREAMING_SNAKE_CASE ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211
| 0
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase : Dict = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 457
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __magic_name__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = "hf-internal-testing/tiny-random-t5"
UpperCAmelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ )
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ )
UpperCAmelCase = tokenizer("This is me" , return_tensors="pt" )
UpperCAmelCase = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
UpperCAmelCase = model.generate(**UpperCamelCase__ )
UpperCAmelCase = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ )
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
UpperCAmelCase = model_reloaded.generate(**UpperCamelCase__ )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = "hf-internal-testing/tiny-random-t5"
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ )
UpperCAmelCase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(UpperCamelCase__ ):
model.save_pretrained(UpperCamelCase__ )
UpperCAmelCase = model.reverse_bettertransformer()
model.save_pretrained(UpperCamelCase__ )
| 457
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 'naver-clova-ix/donut-base-finetuned-docvqa'
__UpperCamelCase = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
__UpperCamelCase = 'document_qa'
__UpperCamelCase = AutoProcessor
__UpperCamelCase = VisionEncoderDecoderModel
__UpperCamelCase = ['image', 'text']
__UpperCamelCase = ['text']
def __init__( self, *A_, **A_ ) -> Dict:
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*A_, **A_ )
def __UpperCAmelCase ( self, A_, A_ ) -> Tuple:
UpperCAmelCase__ ="<s_docvqa><s_question>{user_input}</s_question><s_answer>"
UpperCAmelCase__ =task_prompt.replace("{user_input}", A_ )
UpperCAmelCase__ =self.pre_processor.tokenizer(
A_, add_special_tokens=A_, return_tensors="pt" ).input_ids
UpperCAmelCase__ =self.pre_processor(A_, return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __UpperCAmelCase ( self, A_ ) -> int:
return self.model.generate(
inputs["pixel_values"].to(self.device ), decoder_input_ids=inputs["decoder_input_ids"].to(self.device ), max_length=self.model.decoder.config.max_position_embeddings, early_stopping=A_, pad_token_id=self.pre_processor.tokenizer.pad_token_id, eos_token_id=self.pre_processor.tokenizer.eos_token_id, use_cache=A_, num_beams=1, bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]], return_dict_in_generate=A_, ).sequences
def __UpperCAmelCase ( self, A_ ) -> str:
UpperCAmelCase__ =self.pre_processor.batch_decode(A_ )[0]
UpperCAmelCase__ =sequence.replace(self.pre_processor.tokenizer.eos_token, "" )
UpperCAmelCase__ =sequence.replace(self.pre_processor.tokenizer.pad_token, "" )
UpperCAmelCase__ =re.sub(R"<.*?>", "", A_, count=1 ).strip() # remove first task start token
UpperCAmelCase__ =self.pre_processor.tokenajson(A_ )
return sequence["answer"]
| 625
|
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _UpperCAmelCase ( A ):
'''simple docstring'''
UpperCAmelCase__ =int(A )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =t // 3600, (t // 60) % 60, t % 60
return F"""{h}:{m:02d}:{s:02d}""" if h != 0 else F"""{m:02d}:{s:02d}"""
def _UpperCAmelCase ( A , A , A , A , A=300 ):
'''simple docstring'''
return F"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def _UpperCAmelCase ( A ):
'''simple docstring'''
UpperCAmelCase__ ="<table border=\"1\" class=\"dataframe\">\n"
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
UpperCAmelCase__ =F"""{elt:.6f}""" if isinstance(A , A ) else str(A )
html_code += F""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class snake_case_ :
'''simple docstring'''
__UpperCamelCase = 5
__UpperCamelCase = 0.2
def __init__( self, A_, A_ = None, A_ = True, A_ = None, A_ = 300, ) -> Optional[Any]:
UpperCAmelCase__ =total
UpperCAmelCase__ ="" if prefix is None else prefix
UpperCAmelCase__ =leave
UpperCAmelCase__ =parent
UpperCAmelCase__ =width
UpperCAmelCase__ =None
UpperCAmelCase__ =None
UpperCAmelCase__ =None
def __UpperCAmelCase ( self, A_, A_ = False, A_ = None ) -> Any:
UpperCAmelCase__ =value
if comment is not None:
UpperCAmelCase__ =comment
if self.last_value is None:
UpperCAmelCase__ =UpperCAmelCase__ =time.time()
UpperCAmelCase__ =UpperCAmelCase__ =value
UpperCAmelCase__ =UpperCAmelCase__ =None
UpperCAmelCase__ =self.warmup
UpperCAmelCase__ =1
self.update_bar(A_ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total ):
if self.first_calls > 0:
self.first_calls -= 1
UpperCAmelCase__ =time.time()
UpperCAmelCase__ =current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
UpperCAmelCase__ =self.elapsed_time / (value - self.start_value)
else:
UpperCAmelCase__ =None
if value >= self.total:
UpperCAmelCase__ =self.total
UpperCAmelCase__ =None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
UpperCAmelCase__ =self.average_time_per_item * (self.total - value)
self.update_bar(A_ )
UpperCAmelCase__ =value
UpperCAmelCase__ =current_time
if self.average_time_per_item is None:
UpperCAmelCase__ =1
else:
UpperCAmelCase__ =max(int(self.update_every / self.average_time_per_item ), 1 )
def __UpperCAmelCase ( self, A_, A_=None ) -> Dict:
UpperCAmelCase__ =" " * (len(str(self.total ) ) - len(str(A_ ) )) + str(A_ )
if self.elapsed_time is None:
UpperCAmelCase__ =f"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
UpperCAmelCase__ =f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
UpperCAmelCase__ =(
f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
f""" {format_time(self.predicted_remaining )}"""
)
self.label += f""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f""", {self.comment}]"""
self.display()
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase__ =html_progress_bar(self.value, self.total, self.prefix, self.label, self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
UpperCAmelCase__ =disp.display(disp.HTML(self.html_code ), display_id=A_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __UpperCAmelCase ( self ) -> str:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("" ) )
class snake_case_ ( a ):
'''simple docstring'''
def __init__( self, A_, A_=None ) -> Dict:
super().__init__(A_ )
UpperCAmelCase__ =None if column_names is None else [column_names]
UpperCAmelCase__ =None
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ =html_progress_bar(self.value, self.total, self.prefix, self.label, self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
UpperCAmelCase__ =disp.display(disp.HTML(self.html_code ), display_id=A_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __UpperCAmelCase ( self, A_ ) -> Tuple:
if self.inner_table is None:
UpperCAmelCase__ =[list(values.keys() ), list(values.values() )]
else:
UpperCAmelCase__ =self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(A_ )
UpperCAmelCase__ =columns
self.inner_table.append([values[c] for c in columns] )
def __UpperCAmelCase ( self, A_, A_=None, A_=300 ) -> Union[str, Any]:
UpperCAmelCase__ =NotebookProgressBar(A_, prefix=A_, parent=self, width=A_ )
return self.child_bar
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase__ =None
self.display()
class snake_case_ ( a ):
'''simple docstring'''
def __init__( self ) -> Optional[int]:
UpperCAmelCase__ =None
UpperCAmelCase__ =None
UpperCAmelCase__ =False
def __UpperCAmelCase ( self, A_, A_, A_, **A_ ) -> str:
UpperCAmelCase__ ="Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
UpperCAmelCase__ =0
UpperCAmelCase__ =0
UpperCAmelCase__ =[self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss" )
UpperCAmelCase__ =NotebookTrainingTracker(state.max_steps, A_ )
def __UpperCAmelCase ( self, A_, A_, A_, **A_ ) -> str:
UpperCAmelCase__ =int(state.epoch ) if int(state.epoch ) == state.epoch else f"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1, comment=f"""Epoch {epoch}/{state.num_train_epochs}""", force_update=self._force_next_update, )
UpperCAmelCase__ =False
def __UpperCAmelCase ( self, A_, A_, A_, A_=None, **A_ ) -> int:
if not has_length(A_ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
UpperCAmelCase__ =self.training_tracker.add_child(len(A_ ) )
else:
UpperCAmelCase__ =NotebookProgressBar(len(A_ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def __UpperCAmelCase ( self, A_, A_, A_, **A_ ) -> Optional[Any]:
if self.prediction_bar is not None:
self.prediction_bar.close()
UpperCAmelCase__ =None
def __UpperCAmelCase ( self, A_, A_, A_, A_=None, **A_ ) -> str:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
UpperCAmelCase__ ={"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
UpperCAmelCase__ =state.global_step
self.training_tracker.write_line(A_ )
def __UpperCAmelCase ( self, A_, A_, A_, A_=None, **A_ ) -> str:
if self.training_tracker is not None:
UpperCAmelCase__ ={"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history ):
if "loss" in log:
UpperCAmelCase__ =log["loss"]
break
if self.first_column == "Epoch":
UpperCAmelCase__ =int(state.epoch )
else:
UpperCAmelCase__ =state.global_step
UpperCAmelCase__ ="eval"
for k in metrics:
if k.endswith("_loss" ):
UpperCAmelCase__ =re.sub(R"\_loss$", "", A_ )
UpperCAmelCase__ =metrics.pop("total_flos", A_ )
UpperCAmelCase__ =metrics.pop("epoch", A_ )
UpperCAmelCase__ =metrics.pop(f"""{metric_key_prefix}_runtime""", A_ )
UpperCAmelCase__ =metrics.pop(f"""{metric_key_prefix}_samples_per_second""", A_ )
UpperCAmelCase__ =metrics.pop(f"""{metric_key_prefix}_steps_per_second""", A_ )
UpperCAmelCase__ =metrics.pop(f"""{metric_key_prefix}_jit_compilation_time""", A_ )
for k, v in metrics.items():
if k == f"""{metric_key_prefix}_loss""":
UpperCAmelCase__ =v
else:
UpperCAmelCase__ =k.split("_" )
UpperCAmelCase__ =" ".join([part.capitalize() for part in splits[1:]] )
UpperCAmelCase__ =v
self.training_tracker.write_line(A_ )
self.training_tracker.remove_child()
UpperCAmelCase__ =None
# Evaluation takes a long time so we should force the next update.
UpperCAmelCase__ =True
def __UpperCAmelCase ( self, A_, A_, A_, **A_ ) -> List[str]:
self.training_tracker.update(
state.global_step, comment=f"""Epoch {int(state.epoch )}/{state.num_train_epochs}""", force_update=A_ )
UpperCAmelCase__ =None
| 625
| 1
|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __UpperCamelCase ( __lowerCamelCase ):
def __init__( self : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any ):
'''simple docstring'''
UpperCAmelCase_ = dataset
UpperCAmelCase_ = process
UpperCAmelCase_ = params
def __len__( self : int ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Optional[int] , lowerCAmelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = self.dataset[i]
UpperCAmelCase_ = self.process(a_ , **self.params )
return processed
class __UpperCamelCase ( __lowerCamelCase ):
def __init__( self : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : List[str]=None ):
'''simple docstring'''
UpperCAmelCase_ = loader
UpperCAmelCase_ = infer
UpperCAmelCase_ = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
UpperCAmelCase_ = None
UpperCAmelCase_ = loader_batch_size
# Internal bookkeeping
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.loader )
def __iter__( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = iter(self.loader )
return self
def __A ( self : Optional[Any] ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
UpperCAmelCase_ = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
UpperCAmelCase_ = {}
for k, element in self._loader_batch_data.items():
if isinstance(a_ , a_ ):
# Convert ModelOutput to tuple first
UpperCAmelCase_ = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
UpperCAmelCase_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCAmelCase_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(a_ , a_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
UpperCAmelCase_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCAmelCase_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
UpperCAmelCase_ = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCAmelCase_ = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCAmelCase_ = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
UpperCAmelCase_ = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
UpperCAmelCase_ = self._loader_batch_data.__class__(a_ )
self._loader_batch_index += 1
return result
def __A ( self : Dict ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
UpperCAmelCase_ = next(self.iterator )
UpperCAmelCase_ = self.infer(a_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(a_ , torch.Tensor ):
UpperCAmelCase_ = processed
else:
UpperCAmelCase_ = list(processed.keys() )[0]
UpperCAmelCase_ = processed[key]
if isinstance(a_ , a_ ):
UpperCAmelCase_ = len(a_ )
else:
UpperCAmelCase_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCAmelCase_ = observed_batch_size
# Setting internal index to unwrap the batch
UpperCAmelCase_ = processed
UpperCAmelCase_ = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __UpperCamelCase ( __lowerCamelCase ):
def __init__( self : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : List[str]=None ):
'''simple docstring'''
super().__init__(a_ , a_ , a_ )
def __iter__( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = iter(self.loader )
UpperCAmelCase_ = None
return self
def __A ( self : List[Any] ):
'''simple docstring'''
if self.subiterator is None:
UpperCAmelCase_ = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
UpperCAmelCase_ = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
UpperCAmelCase_ = self.infer(next(self.iterator ) , **self.params )
UpperCAmelCase_ = next(self.subiterator )
return processed
class __UpperCamelCase ( __lowerCamelCase ):
def __iter__( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = iter(self.loader )
return self
def __A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = False
UpperCAmelCase_ = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
UpperCAmelCase_ = self.loader_batch_item()
UpperCAmelCase_ = item.pop("is_last" )
accumulator.append(a_ )
if is_last:
return accumulator
while not is_last:
UpperCAmelCase_ = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(a_ , torch.Tensor ):
UpperCAmelCase_ = processed
else:
UpperCAmelCase_ = list(processed.keys() )[0]
UpperCAmelCase_ = processed[key]
if isinstance(a_ , a_ ):
UpperCAmelCase_ = len(a_ )
else:
UpperCAmelCase_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCAmelCase_ = observed_batch_size
UpperCAmelCase_ = processed
UpperCAmelCase_ = 0
while self._loader_batch_index < self.loader_batch_size:
UpperCAmelCase_ = self.loader_batch_item()
UpperCAmelCase_ = item.pop("is_last" )
accumulator.append(a_ )
if is_last:
return accumulator
else:
UpperCAmelCase_ = processed
UpperCAmelCase_ = item.pop("is_last" )
accumulator.append(a_ )
return accumulator
class __UpperCamelCase ( __lowerCamelCase ):
def __init__( self : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = dataset
UpperCAmelCase_ = key
def __len__( self : str ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : List[Any] , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
return self.dataset[i][self.key]
class __UpperCamelCase ( __lowerCamelCase ):
def __init__( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = dataset
UpperCAmelCase_ = keya
UpperCAmelCase_ = keya
def __len__( self : int ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : int , lowerCAmelCase : Dict ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 707
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_a: Optional[Any] = logging.get_logger(__name__)
def __lowerCAmelCase ( A ):
if isinstance(A , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(A , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(A ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = ['pixel_values']
def __init__( self : int , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = True , lowerCAmelCase : Union[int, float] = 1 / 255 , lowerCAmelCase : bool = True , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , **lowerCAmelCase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 256}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = offset
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : List[Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(lowerCAmelCase , size["shortest_edge"] , default_to_square=lowerCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def __A ( self : List[str] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Dict , ):
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(lowerCAmelCase , size=(size["height"], size["width"]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def __A ( self : List[Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[int, float] , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ = image.astype(np.floataa )
if offset:
UpperCAmelCase_ = image - (scale / 2)
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def __A ( self : Tuple , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : int , ):
'''simple docstring'''
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def __A ( self : Union[str, Any] , lowerCAmelCase : ImageInput , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = None , lowerCAmelCase : float = None , lowerCAmelCase : bool = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(lowerCAmelCase )
if do_resize:
UpperCAmelCase_ = self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase )
if do_center_crop:
UpperCAmelCase_ = self.center_crop(lowerCAmelCase , size=lowerCAmelCase )
if do_rescale:
UpperCAmelCase_ = self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase , offset=lowerCAmelCase )
if do_normalize:
UpperCAmelCase_ = self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase )
UpperCAmelCase_ = to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase )
return image
def __A ( self : Optional[int] , lowerCAmelCase : ImageInput , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = None , lowerCAmelCase : float = None , lowerCAmelCase : bool = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase : Dict , ):
'''simple docstring'''
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = offset if offset is not None else self.offset
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ = make_batched(lowerCAmelCase )
UpperCAmelCase_ = [
[
self._preprocess_image(
image=lowerCAmelCase , do_resize=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , do_center_crop=lowerCAmelCase , crop_size=lowerCAmelCase , do_rescale=lowerCAmelCase , rescale_factor=lowerCAmelCase , offset=lowerCAmelCase , do_normalize=lowerCAmelCase , image_mean=lowerCAmelCase , image_std=lowerCAmelCase , data_format=lowerCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
| 268
| 0
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
A : Tuple = logging.getLogger(__name__)
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any=None ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_UpperCAmelCase , question_encoder_tokenizer=_UpperCAmelCase , generator_tokenizer=_UpperCAmelCase , index=_UpperCAmelCase , init_retrieval=_UpperCAmelCase , )
lowercase__ = None
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
lowercase__ = self._infer_socket_ifname()
# avoid clash with the NCCL port
lowercase__ = str(distributed_port + 1 )
lowercase__ = dist.new_group(ranks=_UpperCAmelCase , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowerCamelCase__ (self : Optional[int] ) -> str:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple=torch.floataa ) -> Tuple:
"""simple docstring"""
lowercase__ = torch.empty(_UpperCAmelCase , dtype=_UpperCAmelCase )
dist.scatter(_UpperCAmelCase , src=0 , scatter_list=_UpperCAmelCase , group=self.process_group )
return target_tensor
def lowerCamelCase__ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
lowercase__ = next((addr for addr in addrs if addr.startswith("""e""" )) , _UpperCAmelCase )
return ifname
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int ) -> Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
lowercase__ , lowercase__ = self._main_retrieve(_UpperCAmelCase , _UpperCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_UpperCAmelCase )
# distributed training
lowercase__ = dist.get_world_size(group=self.process_group )
# gather logic
lowercase__ = None
if self._is_main():
lowercase__ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_UpperCAmelCase )]
dist.gather(torch.tensor(_UpperCAmelCase ) , dst=0 , gather_list=_UpperCAmelCase , group=self.process_group )
# scatter logic
lowercase__ = question_hidden_states.shape[0]
lowercase__ = []
lowercase__ = []
if self._is_main():
assert len(_UpperCAmelCase ) == world_size
lowercase__ , lowercase__ = self._main_retrieve(torch.cat(_UpperCAmelCase ).numpy() , _UpperCAmelCase )
lowercase__ , lowercase__ = torch.tensor(_UpperCAmelCase ), torch.tensor(_UpperCAmelCase )
lowercase__ = self._chunk_tensor(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = self._chunk_tensor(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = self._scattered(_UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
lowercase__ = self._scattered(_UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_UpperCAmelCase )
| 15
|
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 107
| 0
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __snake_case (unittest.TestCase ):
lowerCAmelCase__ = inspect.getfile(accelerate.test_utils )
lowerCAmelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_cli.py"] )
lowerCAmelCase__ = ['''accelerate''', '''launch''']
lowerCAmelCase__ = Path.home() / '''.cache/huggingface/accelerate'''
lowerCAmelCase__ = '''default_config.yaml'''
lowerCAmelCase__ = config_folder / config_file
lowerCAmelCase__ = config_folder / '''_default_config.yaml'''
lowerCAmelCase__ = Path("tests/test_configs" )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] ) -> str:
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any ) -> str:
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
for config in sorted(self.test_config_path.glob("""**/*.yaml""" ) ):
with self.subTest(config_file=a_ ):
execute_subprocess_async(
self.base_cmd + ["""--config_file""", str(a_ ), self.test_file_path] , env=os.environ.copy() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
execute_subprocess_async(["""accelerate""", """test"""] , env=os.environ.copy() )
class __snake_case (unittest.TestCase ):
lowerCAmelCase__ = '''test-tpu'''
lowerCAmelCase__ = '''us-central1-a'''
lowerCAmelCase__ = '''ls'''
lowerCAmelCase__ = ['''accelerate''', '''tpu-config''']
lowerCAmelCase__ = '''cd /usr/share'''
lowerCAmelCase__ = '''tests/test_samples/test_command_file.sh'''
lowerCAmelCase__ = '''Running gcloud compute tpus tpu-vm ssh'''
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Any = run_command(
self.cmd
+ ["""--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug"""] , return_stdout=a_ , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" , a_ , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Tuple = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command""",
self.command,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=a_ , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" , a_ , )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--debug"""] , return_stdout=a_ )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all" , a_ , )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Any = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--debug"""] , return_stdout=a_ , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" , a_ , )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--command""",
self.command,
"""--command""",
"""echo \"Hello World\"""",
"""--debug""",
] , return_stdout=a_ , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all" , a_ , )
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : List[str] = run_command(
self.cmd
+ ["""--config_file""", """tests/test_configs/latest.yaml""", """--command_file""", self.command_file, """--debug"""] , return_stdout=a_ , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all" , a_ , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_lowerCAmelCase : Dict = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command_file""",
self.command_file,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=a_ , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all" , a_ , )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
'''simple docstring'''
_lowerCAmelCase : str = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--debug"""] , return_stdout=a_ , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all" , a_ , )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
'''simple docstring'''
_lowerCAmelCase : Any = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--install_accelerate""",
"""--accelerate_version""",
"""12.0.0""",
"""--debug""",
] , return_stdout=a_ , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all" , a_ , )
| 707
|
def _UpperCAmelCase (UpperCamelCase_ : str ):
'''simple docstring'''
_lowerCAmelCase : Dict = [0] * len(UpperCamelCase_ )
for i in range(1 , len(UpperCamelCase_ ) ):
# use last results for better performance - dynamic programming
_lowerCAmelCase : Union[str, Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowerCAmelCase : str = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowerCAmelCase : Optional[int] = j
return prefix_result
def _UpperCAmelCase (UpperCamelCase_ : str ):
'''simple docstring'''
return max(prefix_function(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCamelCase__ = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
UpperCamelCase_ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
UpperCamelCase_ : bool = field(default=__lowercase , metadata={"help": "Whether tp freeze the encoder."} )
UpperCamelCase_ : bool = field(default=__lowercase , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class lowerCAmelCase__ :
UpperCamelCase_ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
UpperCamelCase_ : Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
UpperCamelCase_ : Optional[int] = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCamelCase_ : Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
UpperCamelCase_ : Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
UpperCamelCase_ : Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
UpperCamelCase_ : Optional[str] = field(default=__lowercase , metadata={"help": "Source language id for translation."} )
UpperCamelCase_ : Optional[str] = field(default=__lowercase , metadata={"help": "Target language id for translation."} )
UpperCamelCase_ : Optional[int] = field(default=__lowercase , metadata={"help": "# num_beams to use for evaluation."} )
UpperCamelCase_ : bool = field(
default=__lowercase , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> str:
"""simple docstring"""
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(lowerCAmelCase , os.path.join(lowerCAmelCase , F'{split}_results.json' ) )
def __A() -> Any:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
check_output_dir(lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
assert hasattr(lowerCAmelCase , lowerCAmelCase ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(lowerCAmelCase , lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowerCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowerCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_UpperCamelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_UpperCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_UpperCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowerCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_UpperCamelCase = SeqaSeqDataset
# Get datasets
_UpperCamelCase = (
dataset_class(
lowerCAmelCase , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
_UpperCamelCase = (
dataset_class(
lowerCAmelCase , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_UpperCamelCase = (
dataset_class(
lowerCAmelCase , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_UpperCamelCase = (
build_compute_metrics_fn(data_args.task , lowerCAmelCase ) if training_args.predict_with_generate else None
)
_UpperCamelCase = SeqaSeqTrainer(
model=lowerCAmelCase , args=lowerCAmelCase , data_args=lowerCAmelCase , train_dataset=lowerCAmelCase , eval_dataset=lowerCAmelCase , data_collator=SeqaSeqDataCollator(
lowerCAmelCase , lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowerCAmelCase , tokenizer=lowerCAmelCase , )
_UpperCamelCase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
_UpperCamelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_UpperCamelCase = train_result.metrics
_UpperCamelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , lowerCAmelCase , training_args.output_dir )
all_metrics.update(lowerCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_UpperCamelCase = trainer.evaluate(metric_key_prefix="""val""" )
_UpperCamelCase = data_args.n_val
_UpperCamelCase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , lowerCAmelCase , training_args.output_dir )
all_metrics.update(lowerCAmelCase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_UpperCamelCase = trainer.predict(test_dataset=lowerCAmelCase , metric_key_prefix="""test""" )
_UpperCamelCase = test_output.metrics
_UpperCamelCase = data_args.n_test
if trainer.is_world_process_zero():
_UpperCamelCase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , lowerCAmelCase , training_args.output_dir )
all_metrics.update(lowerCAmelCase )
if training_args.predict_with_generate:
_UpperCamelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
_UpperCamelCase = lmap(str.strip , lowerCAmelCase )
write_txt_file(lowerCAmelCase , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(lowerCAmelCase , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def __A(lowerCAmelCase ) -> int:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 612
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : int = (KDPMaDiscreteScheduler,)
UpperCamelCase_ : Optional[int] = 10
def A_ ( self , **a ) -> int:
'''simple docstring'''
_UpperCamelCase = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**a )
return config
def A_ ( self ) -> List[str]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=a , beta_end=a )
def A_ ( self ) -> Any:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def A_ ( self ) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
_UpperCamelCase = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(a , a )
_UpperCamelCase = model(a , a )
_UpperCamelCase = scheduler.step(a , a , a )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(a ) )
_UpperCamelCase = torch.mean(torch.abs(a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def A_ ( self ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(a , a )
_UpperCamelCase = model(a , a )
_UpperCamelCase = scheduler.step(a , a , a )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(a ) )
_UpperCamelCase = torch.mean(torch.abs(a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def A_ ( self ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(a , a )
_UpperCamelCase = model(a , a )
_UpperCamelCase = scheduler.step(a , a , a )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(a ) )
_UpperCamelCase = torch.mean(torch.abs(a ) )
if str(a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 612
| 1
|
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case :Optional[Any] = logging.get_logger(__name__)
__snake_case :Any = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[str] = '''owlvit_text_model'''
def __init__( self : str , __SCREAMING_SNAKE_CASE : Dict=49_408 , __SCREAMING_SNAKE_CASE : Optional[int]=512 , __SCREAMING_SNAKE_CASE : str=2_048 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : List[str]=8 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]="quick_gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=1E-5 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : str=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=1.0 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : Tuple=49_406 , __SCREAMING_SNAKE_CASE : List[Any]=49_407 , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = vocab_size
__a = hidden_size
__a = intermediate_size
__a = num_hidden_layers
__a = num_attention_heads
__a = max_position_embeddings
__a = hidden_act
__a = layer_norm_eps
__a = attention_dropout
__a = initializer_range
__a = initializer_factor
@classmethod
def _lowerCamelCase ( cls : Tuple , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE)
__a , __a = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''') == "owlvit":
__a = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[Any] = '''owlvit_vision_model'''
def __init__( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any]=768 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_072 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : str=768 , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : Union[str, Any]="quick_gelu" , __SCREAMING_SNAKE_CASE : List[str]=1E-5 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=1.0 , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
__a = hidden_size
__a = intermediate_size
__a = num_hidden_layers
__a = num_attention_heads
__a = num_channels
__a = image_size
__a = patch_size
__a = hidden_act
__a = layer_norm_eps
__a = attention_dropout
__a = initializer_range
__a = initializer_factor
@classmethod
def _lowerCamelCase ( cls : Tuple , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE)
__a , __a = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''') == "owlvit":
__a = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[Any] = '''owlvit'''
UpperCamelCase__ : Dict = True
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : str=2.65_92 , __SCREAMING_SNAKE_CASE : Dict=True , **__SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
if text_config is None:
__a = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''')
if vision_config is None:
__a = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''')
__a = OwlViTTextConfig(**__SCREAMING_SNAKE_CASE)
__a = OwlViTVisionConfig(**__SCREAMING_SNAKE_CASE)
__a = projection_dim
__a = logit_scale_init_value
__a = return_dict
__a = 1.0
@classmethod
def _lowerCamelCase ( cls : str , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE)
__a , __a = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = {}
__a = text_config
__a = vision_config
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = copy.deepcopy(self.__dict__)
__a = self.text_config.to_dict()
__a = self.vision_config.to_dict()
__a = self.__class__.model_type
return output
class _A ( __UpperCAmelCase ):
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
])
@property
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
])
@property
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return 1E-4
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : "ProcessorMixin" , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , ):
'''simple docstring'''
__a = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE)
__a = super().generate_dummy_inputs(
processor.image_processor , batch_size=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE)
return {**text_input_dict, **image_input_dict}
@property
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return 14
| 60
|
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__snake_case :int = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class _A ( tr.AbstractTransform ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str = " "):
'''simple docstring'''
__a = sentence_delimiter
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return list(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = []
for sent_idx, sentence in enumerate(__SCREAMING_SNAKE_CASE):
chars.extend(self.process_string(__SCREAMING_SNAKE_CASE))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__SCREAMING_SNAKE_CASE) - 1:
chars.append(self.sentence_delimiter)
return chars
__snake_case :Any = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__snake_case :Optional[int] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__snake_case :Optional[int] = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__snake_case :Tuple = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
__snake_case :Tuple = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=False):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )["wer"]
__a = 0
__a = 0
for prediction, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 60
| 1
|
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
_A : List[str] = True
from torch.cuda.amp import autocast
_A : Optional[int] = logging.getLogger(__name__)
@dataclass
class a__ :
__lowerCAmelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__lowerCAmelCase = field(
default=a_, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
__lowerCAmelCase = field(
default=a_, metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
__lowerCAmelCase = field(
default=a_, metadata={"""help""": """Whether to log verbose messages or not."""}, )
__lowerCAmelCase = field(
default=2.0, metadata={"""help""": """Maximum temperature for gumbel softmax."""} )
__lowerCAmelCase = field(
default=0.5, metadata={"""help""": """Minimum temperature for gumbel softmax."""} )
__lowerCAmelCase = field(
default=0.999995, metadata={"""help""": """Decay of gumbel temperature during training."""} )
def __magic_name__ ( __snake_case : ModelArguments , __snake_case : TrainingArguments ) -> Optional[int]:
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase : Optional[int] = logging.WARNING
if model_args.verbose_logging:
lowercase : Any = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
lowercase : Optional[int] = logging.INFO
logger.setLevel(__snake_case )
@dataclass
class a__ :
__lowerCAmelCase = field(
default=a_, metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
__lowerCAmelCase = field(
default=a_, metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__lowerCAmelCase = field(
default="""train""", metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
}, )
__lowerCAmelCase = field(
default="""validation""", metadata={
"""help""": (
"""The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"""
)
}, )
__lowerCAmelCase = field(
default="""file""", metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""}, )
__lowerCAmelCase = field(
default=a_, metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
__lowerCAmelCase = field(
default=1, metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
}, )
__lowerCAmelCase = field(
default=a_, metadata={"""help""": """The number of processes to use for the preprocessing."""}, )
__lowerCAmelCase = field(
default=20.0, metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""} )
@dataclass
class a__ :
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = "longest"
__lowerCAmelCase = None
__lowerCAmelCase = None
def __call__( self , _a ):
lowercase : Dict = self.feature_extractor.pad(
_a , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
lowercase : Optional[Any] = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
lowercase : Optional[Any] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowercase : Optional[int] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
lowercase : List[str] = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowercase : str = 1
lowercase : int = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowercase : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_a , min_masks=2 , )
return batch
class a__ ( a_ ):
def __init__( self , *_a , _a=1 , _a=0 , _a=1.0 , **_a ):
super().__init__(*_a , **_a )
lowercase : Union[str, Any] = 0
lowercase : List[Any] = max_gumbel_temp
lowercase : Dict = min_gumbel_temp
lowercase : Tuple = gumbel_temp_decay
def __magic_name__ ( self , _a , _a ):
model.train()
lowercase : Optional[Any] = self._prepare_inputs(_a )
if self.use_amp:
with autocast():
lowercase : Any = self.compute_loss(_a , _a )
else:
lowercase : Optional[Any] = self.compute_loss(_a , _a )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowercase : Any = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowercase : List[Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
lowercase : int = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_a ).backward()
elif self.use_apex:
with amp.scale_loss(_a , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_a )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def __magic_name__ ( ) -> str:
lowercase : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase , lowercase , lowercase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(__snake_case , __snake_case )
# Downloading and loading a dataset from the hub.
lowercase : Union[str, Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowercase : List[Any] = DatasetDict()
lowercase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
lowercase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowercase : int = DatasetDict()
lowercase : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
lowercase : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowercase : Tuple = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__snake_case )
def prepare_dataset(__snake_case : Optional[Any] ):
# check that all files have the correct sampling rate
lowercase , lowercase : Optional[int] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
lowercase : List[Any] = datasets.map(
__snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
lowercase : Union[str, Any] = vectorized_datasets.filter(
lambda __snake_case : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__snake_case : Any ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
lowercase : str = vectorized_datasets.map(
__snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowercase : int = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm=\'layer\'" )
lowercase : List[Any] = WavaVecaForPreTraining(__snake_case )
lowercase : List[str] = DataCollatorForWavaVecaPretraining(model=__snake_case , feature_extractor=__snake_case )
lowercase : Any = WavaVecaPreTrainer(
model=__snake_case , data_collator=__snake_case , args=__snake_case , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=__snake_case , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 361
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_lowercase : Optional[int] = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_lowercase : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_lowercase : Tuple = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
_lowercase : Any = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
_lowercase : List[str] = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
_lowercase : Dict = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(64, 64)
)
_lowercase : int = tf.keras.preprocessing.image.img_to_array(test_image)
_lowercase : int = np.expand_dims(test_image, axis=0)
_lowercase : str = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_lowercase : Optional[int] = """Normal"""
if result[0][0] == 1:
_lowercase : Union[str, Any] = """Abnormality detected"""
| 210
| 0
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _lowercase :
'''simple docstring'''
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] ) -> Union[str, Any]:
return None
class _lowercase :
'''simple docstring'''
def __magic_name__( self :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] ) -> List[str]:
return None
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __magic_name__( self :Optional[Any] ) -> Any:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCAmelCase__ , '''tf''' , 12 , **lowerCAmelCase__ )
@require_torch
@slow
def __magic_name__( self :Any ) -> Optional[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCAmelCase__ , '''pt''' , 12 , **lowerCAmelCase__ )
@require_torch
@slow
def __magic_name__( self :Dict ) -> Union[str, Any]:
from transformers import BertModel
__SCREAMING_SNAKE_CASE : str = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(lowerCAmelCase__ ) )
vocab_file.flush()
__SCREAMING_SNAKE_CASE : List[str] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
__SCREAMING_SNAKE_CASE : Tuple = BertModel(BertConfig(vocab_size=len(lowerCAmelCase__ ) ) )
model.save_pretrained(lowerCAmelCase__ )
self._test_export(lowerCAmelCase__ , '''pt''' , 12 , lowerCAmelCase__ )
@require_tf
@slow
def __magic_name__( self :int ) -> List[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__SCREAMING_SNAKE_CASE : Optional[int] = self._test_export(lowerCAmelCase__ , '''tf''' , 12 , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = quantize(Path(lowerCAmelCase__ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCAmelCase__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def __magic_name__( self :str ) -> Dict:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__SCREAMING_SNAKE_CASE : int = self._test_export(lowerCAmelCase__ , '''pt''' , 12 , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = quantize(lowerCAmelCase__ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCAmelCase__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def __magic_name__( self :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :int=None , **lowerCAmelCase__ :Optional[int] ) -> Union[str, Any]:
try:
# Compute path
with TemporaryDirectory() as tempdir:
__SCREAMING_SNAKE_CASE : Optional[Any] = Path(lowerCAmelCase__ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
return path
except Exception as e:
self.fail(lowerCAmelCase__ )
@require_torch
@require_tokenizers
@slow
def __magic_name__( self :int ) -> Tuple:
from transformers import BertModel
__SCREAMING_SNAKE_CASE : List[Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
__SCREAMING_SNAKE_CASE : Any = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(lowerCAmelCase__ , lowerCAmelCase__ , '''pt''' )
@require_tf
@require_tokenizers
@slow
def __magic_name__( self :Union[str, Any] ) -> Optional[Any]:
from transformers import TFBertModel
__SCREAMING_SNAKE_CASE : str = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
__SCREAMING_SNAKE_CASE : str = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(lowerCAmelCase__ , lowerCAmelCase__ , '''tf''' )
def __magic_name__( self :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[str] = FeatureExtractionPipeline(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
__SCREAMING_SNAKE_CASE : Tuple = infer_shapes(lowerCAmelCase__ , lowerCAmelCase__ )
# Assert all variables are present
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowerCAmelCase__ )
self.assertSequenceEqual(variable_names[3:] , lowerCAmelCase__ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def __magic_name__( self :Any ) -> str:
__SCREAMING_SNAKE_CASE : str = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
__SCREAMING_SNAKE_CASE : Any = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
__SCREAMING_SNAKE_CASE : Any = ensure_valid_input(FuncContiguousArgs() , lowerCAmelCase__ , lowerCAmelCase__ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCAmelCase__ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCAmelCase__ ) , set(lowerCAmelCase__ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCAmelCase__ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
__SCREAMING_SNAKE_CASE : Optional[Any] = ensure_valid_input(FuncNonContiguousArgs() , lowerCAmelCase__ , lowerCAmelCase__ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 704
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :int ) -> Optional[int]:
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCAmelCase__ ) for s in shape] )}.npy'''
def __magic_name__( self :List[str] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :Any=(4, 4, 64, 64) , lowerCAmelCase__ :List[Any]=False ) -> List[str]:
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase__ , lowerCAmelCase__ ) ) , dtype=lowerCAmelCase__ )
return image
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :int="CompVis/stable-diffusion-v1-4" ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
__SCREAMING_SNAKE_CASE : Optional[int] = '''bf16''' if fpaa else None
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = FlaxUNetaDConditionModel.from_pretrained(
lowerCAmelCase__ , subfolder='''unet''' , dtype=lowerCAmelCase__ , revision=lowerCAmelCase__ )
return model, params
def __magic_name__( self :Any , lowerCAmelCase__ :str=0 , lowerCAmelCase__ :Optional[int]=(4, 77, 768) , lowerCAmelCase__ :str=False ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : str = jnp.bfloataa if fpaa else jnp.floataa
__SCREAMING_SNAKE_CASE : Any = jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase__ , lowerCAmelCase__ ) ) , dtype=lowerCAmelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def __magic_name__( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = self.get_latents(lowerCAmelCase__ , fpaa=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = self.get_encoder_hidden_states(lowerCAmelCase__ , fpaa=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = model.apply(
{'''params''': params} , lowerCAmelCase__ , jnp.array(lowerCAmelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase__ , ).sample
assert sample.shape == latents.shape
__SCREAMING_SNAKE_CASE : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE : Tuple = jnp.array(lowerCAmelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> str:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = self.get_latents(lowerCAmelCase__ , shape=(4, 4, 96, 96) , fpaa=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = self.get_encoder_hidden_states(lowerCAmelCase__ , shape=(4, 77, 1_024) , fpaa=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model.apply(
{'''params''': params} , lowerCAmelCase__ , jnp.array(lowerCAmelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase__ , ).sample
assert sample.shape == latents.shape
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE : str = jnp.array(lowerCAmelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-2 )
| 260
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__lowerCAmelCase ):
lowerCAmelCase__ : Optional[int] = ["transformers", "torch", "note_seq"]
def __init__( self : str , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def __a ( cls : Optional[int] , *lowerCamelCase : int , **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def __a ( cls : Any , *lowerCamelCase : int , **lowerCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 489
|
'''simple docstring'''
import sys
lowerCAmelCase_ : List[str] = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def _lowerCamelCase (__lowerCamelCase : str = N ) -> int:
a__ = -sys.maxsize - 1
for i in range(len(__lowerCamelCase ) - 12 ):
a__ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
a__ = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 489
| 1
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Any:
'''simple docstring'''
def run_func(_UpperCAmelCase ):
@wraps(_UpperCAmelCase )
def run_in_eager_mode(*_UpperCAmelCase , **_UpperCAmelCase ):
return func(*_UpperCAmelCase , **_UpperCAmelCase )
@wraps(_UpperCAmelCase )
@tf.function(experimental_compile=_UpperCAmelCase )
def run_in_graph_mode(*_UpperCAmelCase , **_UpperCAmelCase ):
return func(*_UpperCAmelCase , **_UpperCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> ["tf.Tensor"]:
'''simple docstring'''
__lowercase = random.Random()
__lowercase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(_UpperCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = """TensorFlow"""
@property
def snake_case__ ( self ):
return tf.__version__
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# initialize GPU on separate process
__lowercase = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__lowercase = self._prepare_inference_func(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return self._measure_speed(_inference )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__lowercase = self._prepare_train_func(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return self._measure_speed(_train )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCAmelCase_ )
__lowercase = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__lowercase = self._prepare_inference_func(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return self._measure_memory(_inference )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCAmelCase_ )
__lowercase = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__lowercase = self._prepare_train_func(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return self._measure_memory(_train )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
__lowercase = (
hasattr(lowerCAmelCase_ , "architectures" )
and isinstance(config.architectures , lowerCAmelCase_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__lowercase = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
__lowercase = __import__("transformers" , fromlist=[model_class] )
__lowercase = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase = model_cls(lowerCAmelCase_ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
__lowercase = TF_MODEL_MAPPING[config.__class__](lowerCAmelCase_ )
# encoder-decoder has vocab size saved differently
__lowercase = config.vocab_size if hasattr(lowerCAmelCase_ , "vocab_size" ) else config.encoder.vocab_size
__lowercase = random_input_ids(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ , training=lowerCAmelCase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowerCAmelCase_ , training=lowerCAmelCase_ )
__lowercase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
__lowercase = (
hasattr(lowerCAmelCase_ , "architectures" )
and isinstance(config.architectures , lowerCAmelCase_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__lowercase = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
__lowercase = __import__("transformers" , fromlist=[model_class] )
__lowercase = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase = model_cls(lowerCAmelCase_ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
__lowercase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCAmelCase_ )
# encoder-decoder has vocab size saved differently
__lowercase = config.vocab_size if hasattr(lowerCAmelCase_ , "vocab_size" ) else config.encoder.vocab_size
__lowercase = random_input_ids(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__lowercase = model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )[0]
__lowercase = tf.gradients(lowerCAmelCase_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__lowercase = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )[0]
__lowercase = tf.gradients(lowerCAmelCase_ , model.trainable_variables )
return gradients
__lowercase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def snake_case__ ( self , lowerCAmelCase_ ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(lowerCAmelCase_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__lowercase = timeit.repeat(
lowerCAmelCase_ , repeat=self.args.repeat , number=10 , )
return min(lowerCAmelCase_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def snake_case__ ( self , lowerCAmelCase_ ):
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
__lowercase = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
__lowercase = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
__lowercase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__lowercase = nvml.nvmlDeviceGetMemoryInfo(lowerCAmelCase_ )
__lowercase = meminfo.used
__lowercase = Memory(lowerCAmelCase_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
__lowercase = None
else:
__lowercase = measure_peak_memory_cpu(lowerCAmelCase_ )
__lowercase = Memory(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
__lowercase = stop_memory_tracing(lowerCAmelCase_ )
if memory is None:
__lowercase = summary.total
else:
__lowercase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 703
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
lowerCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case :
"""simple docstring"""
__lowerCAmelCase = field(
default=__snake_case ,metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} ,)
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(__snake_case )} ,)
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
@dataclass
class snake_case :
"""simple docstring"""
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """The input training data file (a text file)."""} )
__lowerCAmelCase = field(
default=__snake_case ,metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} ,)
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} ,)
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} ,)
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} ,)
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} ,)
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
__lowerCAmelCase = field(default=__snake_case ,metadata={"""help""": """Whether ot not to use whole word mask."""} )
__lowerCAmelCase = field(
default=0.15 ,metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
__lowerCAmelCase = field(
default=1 / 6 ,metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} ,)
__lowerCAmelCase = field(
default=5 ,metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
__lowerCAmelCase = field(
default=-1 ,metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} ,)
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = None , ) -> Optional[int]:
'''simple docstring'''
def _dataset(_UpperCAmelCase , _UpperCAmelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size , ref_path=_UpperCAmelCase , )
return LineByLineTextDataset(tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_UpperCAmelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_UpperCAmelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __lowercase ( ) -> Optional[Any]:
'''simple docstring'''
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , _UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
__lowercase = AutoModelWithLMHead.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(_UpperCAmelCase , tokenizer=_UpperCAmelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(_UpperCAmelCase , tokenizer=_UpperCAmelCase , evaluate=_UpperCAmelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=_UpperCAmelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=_UpperCAmelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , data_collator=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , prediction_loss_only=_UpperCAmelCase , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_UpperCAmelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output["eval_loss"] )
__lowercase = {"perplexity": perplexity}
__lowercase = os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(_UpperCAmelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , _UpperCAmelCase , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(_UpperCAmelCase )
return results
def __lowercase ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 576
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_lowerCamelCase : int = re.compile(r'''\s+''')
def a_ ( __lowercase : List[Any] ) -> int:
return {"hash": hashlib.mda(re.sub(__lowercase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def a_ ( __lowercase : List[Any] ) -> Dict:
_snake_case = [len(__lowercase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__lowercase ), "line_max": max(__lowercase )}
def a_ ( __lowercase : Optional[int] ) -> List[str]:
_snake_case = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] ) -> Optional[int]:
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def a_ ( __lowercase : Union[str, Any] , __lowercase : int=5 ) -> Optional[Any]:
_snake_case = ['auto-generated', 'autogenerated', 'automatically generated']
_snake_case = example['content'].splitlines()
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a_ ( __lowercase : List[Any] , __lowercase : int=5 , __lowercase : Tuple=0.0_5 ) -> Union[str, Any]:
_snake_case = ['unit tests', 'test file', 'configuration file']
_snake_case = example['content'].splitlines()
_snake_case = 0
_snake_case = 0
# first test
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case = example['content'].count('\n' )
_snake_case = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a_ ( __lowercase : Union[str, Any] ) -> Any:
_snake_case = ['def ', 'class ', 'for ', 'while ']
_snake_case = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a_ ( __lowercase : Tuple , __lowercase : Any=4 ) -> List[str]:
_snake_case = example['content'].splitlines()
_snake_case = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a_ ( __lowercase : Dict ) -> Dict:
_snake_case = tokenizer(example['content'] , truncation=__lowercase )['input_ids']
_snake_case = len(example['content'] ) / len(__lowercase )
return {"ratio": ratio}
def a_ ( __lowercase : Optional[Any] ) -> Any:
_snake_case = {}
results.update(get_hash(__lowercase ) )
results.update(line_stats(__lowercase ) )
results.update(alpha_stats(__lowercase ) )
results.update(char_token_ratio(__lowercase ) )
results.update(is_autogenerated(__lowercase ) )
results.update(is_config_or_test(__lowercase ) )
results.update(has_no_keywords(__lowercase ) )
results.update(has_few_assignments(__lowercase ) )
return results
def a_ ( __lowercase : Optional[int] , __lowercase : str , __lowercase : List[Any] ) -> int:
if not check_uniques(__lowercase , __lowercase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a_ ( __lowercase : Dict ) -> Dict:
with open(__lowercase , 'rb' ) as f_in:
with gzip.open(str(__lowercase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__lowercase , __lowercase )
os.unlink(__lowercase )
# Settings
_lowerCamelCase : Dict = HfArgumentParser(PreprocessingArguments)
_lowerCamelCase : Dict = parser.parse_args()
if args.num_workers is None:
_lowerCamelCase : int = multiprocessing.cpu_count()
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_lowerCamelCase : Any = time.time()
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name, split='''train''')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
_lowerCamelCase : Optional[int] = time.time()
_lowerCamelCase : Union[str, Any] = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
_lowerCamelCase : List[Any] = set(ds.unique('''hash'''))
_lowerCamelCase : Dict = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
_lowerCamelCase : List[Any] = time.time()
_lowerCamelCase : Optional[int] = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_lowerCamelCase : Union[str, Any] = time.time()
_lowerCamelCase , _lowerCamelCase : Dict = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
_lowerCamelCase : Optional[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
_lowerCamelCase : int = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
_lowerCamelCase : Union[str, Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_lowerCamelCase : Dict = str(data_dir / F'file-{file_number+1:012}.json')
_lowerCamelCase : str = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}')
| 686
| 0
|
def lowercase__ ( _UpperCamelCase) -> List[Any]:
"""simple docstring"""
UpperCamelCase = abs(_UpperCamelCase)
UpperCamelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def lowercase__ ( _UpperCamelCase) -> Any:
"""simple docstring"""
UpperCamelCase = abs(_UpperCamelCase)
return n if n < 10 else n % 10 + sum_of_digits(n // 10)
def lowercase__ ( _UpperCamelCase) -> Optional[Any]:
"""simple docstring"""
return sum(int(_UpperCamelCase) for c in str(abs(_UpperCamelCase)))
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase , _UpperCamelCase) -> None:
UpperCamelCase = F'{func.__name__}({value})'
UpperCamelCase = timeit(F'__main__.{call}' , setup='import __main__')
print(F'{call:56} = {func(_UpperCamelCase)} -- {timing:.4f} seconds')
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCamelCase , _UpperCamelCase)
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 702
|
from __future__ import annotations
__magic_name__ : str = []
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> bool:
"""simple docstring"""
for i in range(len(_UpperCamelCase)):
if board[row][i] == 1:
return False
for i in range(len(_UpperCamelCase)):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCamelCase , -1 , -1) , range(_UpperCamelCase , -1 , -1)):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCamelCase , -1 , -1) , range(_UpperCamelCase , len(_UpperCamelCase))):
if board[i][j] == 1:
return False
return True
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> bool:
"""simple docstring"""
if row >= len(_UpperCamelCase):
solution.append(_UpperCamelCase)
printboard(_UpperCamelCase)
print()
return True
for i in range(len(_UpperCamelCase)):
if is_safe(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase):
UpperCamelCase = 1
solve(_UpperCamelCase , row + 1)
UpperCamelCase = 0
return False
def lowercase__ ( _UpperCamelCase) -> None:
"""simple docstring"""
for i in range(len(_UpperCamelCase)):
for j in range(len(_UpperCamelCase)):
if board[i][j] == 1:
print('Q' , end=' ')
else:
print('.' , end=' ')
print()
# n=int(input("The no. of queens"))
__magic_name__ : List[Any] = 8
__magic_name__ : str = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 410
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__SCREAMING_SNAKE_CASE : List[Any] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=lowercase , cache_dir=lowercase )
__SCREAMING_SNAKE_CASE : Optional[Any] = [t[-1] for t in os.walk(os.path.join(lowercase , os.listdir(lowercase )[0] , '''snapshots''' ) )]
__SCREAMING_SNAKE_CASE : Tuple = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def _snake_case ( self ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__SCREAMING_SNAKE_CASE : List[Any] = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 4
__SCREAMING_SNAKE_CASE : int = jax.device_count()
__SCREAMING_SNAKE_CASE : int = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Optional[Any] = pipeline.prepare_inputs(lowercase )
# shard inputs and rng
__SCREAMING_SNAKE_CASE : str = replicate(lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(lowercase , lowercase )
__SCREAMING_SNAKE_CASE : List[str] = shard(lowercase )
__SCREAMING_SNAKE_CASE : Optional[Any] = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(lowercase , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
__SCREAMING_SNAKE_CASE : str = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowercase ) == num_samples
def _snake_case ( self ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : str = 5_0
__SCREAMING_SNAKE_CASE : Dict = jax.device_count()
__SCREAMING_SNAKE_CASE : Dict = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Dict = pipeline.prepare_inputs(lowercase )
# shard inputs and rng
__SCREAMING_SNAKE_CASE : List[Any] = replicate(lowercase )
__SCREAMING_SNAKE_CASE : List[str] = jax.random.split(lowercase , lowercase )
__SCREAMING_SNAKE_CASE : Optional[int] = shard(lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(lowercase , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def _snake_case ( self ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Dict = 5_0
__SCREAMING_SNAKE_CASE : Tuple = jax.device_count()
__SCREAMING_SNAKE_CASE : str = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : List[Any] = pipeline.prepare_inputs(lowercase )
# shard inputs and rng
__SCREAMING_SNAKE_CASE : str = replicate(lowercase )
__SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.split(lowercase , lowercase )
__SCREAMING_SNAKE_CASE : str = shard(lowercase )
__SCREAMING_SNAKE_CASE : int = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def _snake_case ( self ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
__SCREAMING_SNAKE_CASE : Optional[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__SCREAMING_SNAKE_CASE : Any = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Tuple = 5_0
__SCREAMING_SNAKE_CASE : List[Any] = jax.device_count()
__SCREAMING_SNAKE_CASE : int = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : int = pipeline.prepare_inputs(lowercase )
# shard inputs and rng
__SCREAMING_SNAKE_CASE : Tuple = replicate(lowercase )
__SCREAMING_SNAKE_CASE : Any = jax.random.split(lowercase , lowercase )
__SCREAMING_SNAKE_CASE : List[str] = shard(lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def _snake_case ( self ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[Any] = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , set_alpha_to_one=lowercase , steps_offset=1 , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=lowercase , safety_checker=lowercase , )
__SCREAMING_SNAKE_CASE : str = scheduler.create_state()
__SCREAMING_SNAKE_CASE : Tuple = scheduler_state
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__SCREAMING_SNAKE_CASE : List[str] = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : List[str] = 5_0
__SCREAMING_SNAKE_CASE : str = jax.device_count()
__SCREAMING_SNAKE_CASE : List[str] = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : str = pipeline.prepare_inputs(lowercase )
# shard inputs and rng
__SCREAMING_SNAKE_CASE : int = replicate(lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(lowercase , lowercase )
__SCREAMING_SNAKE_CASE : Any = shard(lowercase )
__SCREAMING_SNAKE_CASE : str = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(lowercase , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def _snake_case ( self ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__SCREAMING_SNAKE_CASE : List[Any] = jax.device_count()
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : str = jax.random.split(jax.random.PRNGKey(0 ) , lowercase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=lowercase , )
__SCREAMING_SNAKE_CASE : Optional[Any] = replicate(lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.prepare_inputs(lowercase )
__SCREAMING_SNAKE_CASE : Optional[Any] = shard(lowercase )
__SCREAMING_SNAKE_CASE : Tuple = pipeline(lowercase , lowercase , lowercase , jit=lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE : Optional[Any] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=lowercase , use_memory_efficient_attention=lowercase , )
__SCREAMING_SNAKE_CASE : int = replicate(lowercase )
__SCREAMING_SNAKE_CASE : str = pipeline.prepare_inputs(lowercase )
__SCREAMING_SNAKE_CASE : Tuple = shard(lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = pipeline(lowercase , lowercase , lowercase , jit=lowercase ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE : Tuple = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 158
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_A = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 158
| 1
|
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a :
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : str=64 , lowerCamelCase__ : Dict=None ) -> int:
"""simple docstring"""
__lowercase = np.random.default_rng(lowerCamelCase__ )
__lowercase = length
__lowercase = rng.normal(size=(length,) ).astype(np.floataa )
__lowercase = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Tuple ) -> Tuple:
"""simple docstring"""
return self.length
def __getitem__( self : List[Any] , lowerCamelCase__ : Tuple ) -> str:
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class a ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase__ : Optional[Any]=0 , lowerCamelCase__ : Union[str, Any]=0 , lowerCamelCase__ : Dict=False ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowercase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowercase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowercase = True
def UpperCAmelCase_ ( self : Tuple , lowerCamelCase__ : Optional[int]=None ) -> List[Any]:
"""simple docstring"""
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__lowercase = False
return x * self.a[0] + self.b[0]
class a ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase__ : List[Any]=0 , lowerCamelCase__ : int=0 , lowerCamelCase__ : int=False ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowercase = torch.nn.Parameter(torch.tensor(lowerCamelCase__ ).float() )
__lowercase = torch.nn.Parameter(torch.tensor(lowerCamelCase__ ).float() )
__lowercase = True
def UpperCAmelCase_ ( self : Any , lowerCamelCase__ : Dict=None ) -> Union[str, Any]:
"""simple docstring"""
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__lowercase = False
return x * self.a + self.b
def _A( UpperCamelCase__ : Any , UpperCamelCase__ : int = 16 ) -> Tuple:
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
__lowercase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__lowercase = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
__lowercase = load_dataset('''csv''' , data_files=UpperCamelCase__ )
__lowercase = datasets['''train'''].unique('''label''' )
__lowercase = {v: i for i, v in enumerate(UpperCamelCase__ )}
def tokenize_function(UpperCamelCase__ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__lowercase = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' )
if "label" in examples:
__lowercase = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowercase = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(UpperCamelCase__ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(UpperCamelCase__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__lowercase = DataLoader(tokenized_datasets['''train'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=2 )
__lowercase = DataLoader(tokenized_datasets['''validation'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 362
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase__ = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _A( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : List[str]=None , ) -> int:
'''simple docstring'''
if attention_mask is None:
__lowercase = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__lowercase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__lowercase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowercase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowercase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class a :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any]=13 , lowerCamelCase__ : Union[str, Any]=7 , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=False , lowerCamelCase__ : Any=99 , lowerCamelCase__ : str=16 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Tuple=4 , lowerCamelCase__ : Optional[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Any=32 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[Any]=1 , lowerCamelCase__ : List[Any]=0 , lowerCamelCase__ : int=0.0_2 , ) -> Optional[int]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
__lowercase = initializer_range
def UpperCAmelCase_ ( self : int ) -> Any:
"""simple docstring"""
__lowercase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__lowercase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__lowercase = shift_tokens_right(lowerCamelCase__ , 1 , 2 )
__lowercase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCamelCase__ , )
__lowercase = prepare_blenderbot_inputs_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, inputs_dict
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase_ ( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) -> Any:
"""simple docstring"""
__lowercase = 20
__lowercase = model_class_name(lowerCamelCase__ )
__lowercase = model.encode(inputs_dict['''input_ids'''] )
__lowercase , __lowercase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__lowercase = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
__lowercase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__lowercase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
__lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__lowercase = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase__ , )
__lowercase = model.decode(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def UpperCAmelCase_ ( self : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = 20
__lowercase = model_class_name(lowerCamelCase__ )
__lowercase = model.encode(inputs_dict['''input_ids'''] )
__lowercase , __lowercase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__lowercase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowercase = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
__lowercase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
__lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__lowercase = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
__lowercase = model.decode(lowerCamelCase__ , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ )
__lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
@require_flax
class a ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = 99
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__lowercase = input_ids.shape[0]
__lowercase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = self._get_config_and_data()
__lowercase = FlaxBlenderbotForConditionalGeneration(lowerCamelCase__ )
__lowercase = lm_model(input_ids=lowerCamelCase__ )
__lowercase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowerCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__lowercase = FlaxBlenderbotForConditionalGeneration(lowerCamelCase__ )
__lowercase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__lowercase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__lowercase = lm_model(input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
__lowercase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__lowercase = shift_tokens_right(lowerCamelCase__ , 1 , 2 )
__lowercase = np.equal(lowerCamelCase__ , 1 ).astype(np.floataa ).sum()
__lowercase = np.equal(lowerCamelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCamelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class a ( __SCREAMING_SNAKE_CASE , unittest.TestCase , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = True
UpperCamelCase_ : List[str] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCamelCase_ : Union[str, Any] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = FlaxBlenderbotModelTester(self )
def UpperCAmelCase_ ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = model_class(lowerCamelCase__ )
@jax.jit
def encode_jitted(lowerCamelCase__ : List[str] , lowerCamelCase__ : Any=None , **lowerCamelCase__ : Dict ):
return model.encode(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
with self.subTest('''JIT Enabled''' ):
__lowercase = encode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__lowercase = encode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = model_class(lowerCamelCase__ )
__lowercase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__lowercase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ):
return model.decode(
decoder_input_ids=lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , encoder_outputs=lowerCamelCase__ , )
with self.subTest('''JIT Enabled''' ):
__lowercase = decode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__lowercase = decode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__lowercase = np.ones((1, 1) ) * model.config.eos_token_id
__lowercase = model(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
__lowercase = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
__lowercase = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=lowerCamelCase__ )
__lowercase = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
__lowercase = ['''Sam''']
__lowercase = tokenizer(lowerCamelCase__ , return_tensors='''jax''' )
__lowercase = model.generate(**lowerCamelCase__ , **lowerCamelCase__ )
__lowercase = '''Sam is a great name. It means "sun" in Gaelic.'''
__lowercase = tokenizer.batch_decode(lowerCamelCase__ , **lowerCamelCase__ )
assert generated_txt[0].strip() == tgt_text
| 362
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
SCREAMING_SNAKE_CASE_ = [144, 192, 240]
SCREAMING_SNAKE_CASE_ = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
SCREAMING_SNAKE_CASE_ = [96, 120, 144]
SCREAMING_SNAKE_CASE_ = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
SCREAMING_SNAKE_CASE_ = [64, 80, 96]
SCREAMING_SNAKE_CASE_ = [16, 16, 24, 48, 64, 80, 320]
SCREAMING_SNAKE_CASE_ = 0.0_5
SCREAMING_SNAKE_CASE_ = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
SCREAMING_SNAKE_CASE_ = 512
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 21
SCREAMING_SNAKE_CASE_ = '''pascal-voc-id2label.json'''
else:
SCREAMING_SNAKE_CASE_ = 1_000
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__a, __a, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase ( __a, __a=False ):
for i in range(1, 6 ):
if F'layer_{i}.' in name:
SCREAMING_SNAKE_CASE_ = name.replace(F'layer_{i}.', F'encoder.layer.{i - 1}.' )
if "conv_1." in name:
SCREAMING_SNAKE_CASE_ = name.replace('''conv_1.''', '''conv_stem.''' )
if ".block." in name:
SCREAMING_SNAKE_CASE_ = name.replace('''.block.''', '''.''' )
if "exp_1x1" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''exp_1x1''', '''expand_1x1''' )
if "red_1x1" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''red_1x1''', '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
SCREAMING_SNAKE_CASE_ = name.replace('''.local_rep.conv_3x3.''', '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
SCREAMING_SNAKE_CASE_ = name.replace('''.local_rep.conv_1x1.''', '''.conv_1x1.''' )
if ".norm." in name:
SCREAMING_SNAKE_CASE_ = name.replace('''.norm.''', '''.normalization.''' )
if ".conv." in name:
SCREAMING_SNAKE_CASE_ = name.replace('''.conv.''', '''.convolution.''' )
if ".conv_proj." in name:
SCREAMING_SNAKE_CASE_ = name.replace('''.conv_proj.''', '''.conv_projection.''' )
for i in range(0, 2 ):
for j in range(0, 4 ):
if F'.{i}.{j}.' in name:
SCREAMING_SNAKE_CASE_ = name.replace(F'.{i}.{j}.', F'.{i}.layer.{j}.' )
for i in range(2, 6 ):
for j in range(0, 4 ):
if F'.{i}.{j}.' in name:
SCREAMING_SNAKE_CASE_ = name.replace(F'.{i}.{j}.', F'.{i}.' )
if "expand_1x1" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''expand_1x1''', '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''conv_3x3''', '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''reduce_1x1''', '''downsampling_layer.reduce_1x1''' )
for i in range(2, 5 ):
if F'.global_rep.{i}.weight' in name:
SCREAMING_SNAKE_CASE_ = name.replace(F'.global_rep.{i}.weight', '''.layernorm.weight''' )
if F'.global_rep.{i}.bias' in name:
SCREAMING_SNAKE_CASE_ = name.replace(F'.global_rep.{i}.bias', '''.layernorm.bias''' )
if ".global_rep." in name:
SCREAMING_SNAKE_CASE_ = name.replace('''.global_rep.''', '''.transformer.''' )
if ".pre_norm_mha.0." in name:
SCREAMING_SNAKE_CASE_ = name.replace('''.pre_norm_mha.0.''', '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
SCREAMING_SNAKE_CASE_ = name.replace('''.pre_norm_mha.1.out_proj.''', '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
SCREAMING_SNAKE_CASE_ = name.replace('''.pre_norm_ffn.0.''', '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
SCREAMING_SNAKE_CASE_ = name.replace('''.pre_norm_ffn.1.''', '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
SCREAMING_SNAKE_CASE_ = name.replace('''.pre_norm_ffn.4.''', '''.output.dense.''' )
if ".transformer." in name:
SCREAMING_SNAKE_CASE_ = name.replace('''.transformer.''', '''.transformer.layer.''' )
if ".aspp_layer." in name:
SCREAMING_SNAKE_CASE_ = name.replace('''.aspp_layer.''', '''.''' )
if ".aspp_pool." in name:
SCREAMING_SNAKE_CASE_ = name.replace('''.aspp_pool.''', '''.''' )
if "seg_head." in name:
SCREAMING_SNAKE_CASE_ = name.replace('''seg_head.''', '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
SCREAMING_SNAKE_CASE_ = name.replace('''segmentation_head.classifier.classifier.''', '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
SCREAMING_SNAKE_CASE_ = name.replace('''classifier.fc.''', '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
SCREAMING_SNAKE_CASE_ = '''mobilevit.''' + name
return name
def _lowerCamelCase ( __a, __a, __a=False ):
if base_model:
SCREAMING_SNAKE_CASE_ = ''''''
else:
SCREAMING_SNAKE_CASE_ = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = orig_state_dict.pop(__a )
if key[:8] == "encoder.":
SCREAMING_SNAKE_CASE_ = key[8:]
if "qkv" in key:
SCREAMING_SNAKE_CASE_ = key.split('''.''' )
SCREAMING_SNAKE_CASE_ = int(key_split[0][6:] ) - 1
SCREAMING_SNAKE_CASE_ = int(key_split[3] )
SCREAMING_SNAKE_CASE_ = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}' )
SCREAMING_SNAKE_CASE_ = layer.transformer.layer[transformer_num].attention.attention.all_head_size
SCREAMING_SNAKE_CASE_ = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
SCREAMING_SNAKE_CASE_ = val[:dim, :]
SCREAMING_SNAKE_CASE_ = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_ = val[:dim]
SCREAMING_SNAKE_CASE_ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE_ = val[-dim:]
else:
SCREAMING_SNAKE_CASE_ = val
return orig_state_dict
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__a, stream=__a ).raw )
return im
@torch.no_grad()
def _lowerCamelCase ( __a, __a, __a, __a=False ):
SCREAMING_SNAKE_CASE_ = get_mobilevit_config(__a )
# load original state_dict
SCREAMING_SNAKE_CASE_ = torch.load(__a, map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
SCREAMING_SNAKE_CASE_ = MobileViTForSemanticSegmentation(__a ).eval()
else:
SCREAMING_SNAKE_CASE_ = MobileViTForImageClassification(__a ).eval()
SCREAMING_SNAKE_CASE_ = convert_state_dict(__a, __a )
model.load_state_dict(__a )
# Check outputs on an image, prepared by MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32 )
SCREAMING_SNAKE_CASE_ = image_processor(images=prepare_img(), return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = model(**__a )
SCREAMING_SNAKE_CASE_ = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[6.2_0_6_5, 6.1_2_9_2, 6.2_0_7_0], [6.1_0_7_9, 6.1_2_5_4, 6.1_7_4_7], [6.0_0_4_2, 6.1_0_7_1, 6.1_0_3_4]],
[[-6.9_2_5_3, -6.8_6_5_3, -7.0_3_9_8], [-7.3_2_1_8, -7.3_9_8_3, -7.3_6_7_0], [-7.1_9_6_1, -7.2_4_8_2, -7.1_5_6_9]],
[[-4.4_7_2_3, -4.4_3_4_8, -4.3_7_6_9], [-5.3_6_2_9, -5.4_6_3_2, -5.4_5_9_8], [-5.1_5_8_7, -5.3_4_0_2, -5.5_0_5_9]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[5.4_4_4_9, 5.5_7_3_3, 5.6_3_1_4], [5.1_8_1_5, 5.3_9_3_0, 5.5_9_6_3], [5.1_6_5_6, 5.4_3_3_3, 5.4_8_5_3]],
[[-9.4_4_2_3, -9.7_7_6_6, -9.6_7_1_4], [-9.1_5_8_1, -9.5_7_2_0, -9.5_5_1_9], [-9.1_0_0_6, -9.6_4_5_8, -9.5_7_0_3]],
[[-7.7_7_2_1, -7.3_7_1_6, -7.1_5_8_3], [-8.4_5_9_9, -8.0_6_2_4, -7.7_9_4_4], [-8.4_1_7_2, -7.8_3_6_6, -7.5_0_2_5]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[6.9_8_1_1, 6.9_7_4_3, 7.3_1_2_3], [7.1_7_7_7, 7.1_9_3_1, 7.3_9_3_8], [7.5_6_3_3, 7.8_0_5_0, 7.8_9_0_1]],
[[-1_0.5_5_3_6, -1_0.2_3_3_2, -1_0.2_9_2_4], [-1_0.2_3_3_6, -9.8_6_2_4, -9.5_9_6_4], [-1_0.8_8_4_0, -1_0.8_1_5_8, -1_0.6_6_5_9]],
[[-3.4_9_3_8, -3.0_6_3_1, -2.8_6_2_0], [-3.4_2_0_5, -2.8_1_3_5, -2.6_8_7_5], [-3.4_1_7_9, -2.7_9_4_5, -2.8_7_5_0]],
] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3, :3, :3], __a, atol=1E-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.9_8_6_6, 0.2_3_9_2, -1.1_2_4_1] )
elif mobilevit_name == "mobilevit_xs":
SCREAMING_SNAKE_CASE_ = torch.tensor([-2.4_7_6_1, -0.9_3_9_9, -1.9_5_8_7] )
elif mobilevit_name == "mobilevit_xxs":
SCREAMING_SNAKE_CASE_ = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3], __a, atol=1E-4 )
Path(__a ).mkdir(exist_ok=__a )
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__a )
if push_to_hub:
SCREAMING_SNAKE_CASE_ = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
SCREAMING_SNAKE_CASE_ = model_mapping[mobilevit_name]
image_processor.push_to_hub(__a, organization='''apple''' )
model.push_to_hub(__a, organization='''apple''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 626
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
lowerCAmelCase__ = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
lowerCAmelCase__ = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
lowerCAmelCase__ = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
def _lowercase (self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
SCREAMING_SNAKE_CASE_ = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE_ = evaluate(dataset=SCREAMING_SNAKE_CASE_ , predictions=SCREAMING_SNAKE_CASE_ )
return score
| 626
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] ) -> str:
lowercase : Optional[Any] =[0 for i in range(r + 1 )]
# nc0 = 1
lowercase : Optional[Any] =1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowercase : str =min(__magic_name__ , __magic_name__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 710
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase : Any =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : List[Any] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
'''simple docstring'''
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCAmelCase__ ):
lowercase : Optional[int] =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase : Optional[int] =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCAmelCase__ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase : str =randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase : Dict =self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase : Dict =self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , eta=UpperCAmelCase__ , use_clipped_model_output=UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
lowercase : Optional[Any] =(image / 2 + 0.5).clamp(0 , 1 )
lowercase : Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : List[str] =self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 88
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ) -> List[str]:
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 373
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "imagegpt"
_lowerCAmelCase = ["past_key_values"]
_lowerCAmelCase = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , _lowercase=512 + 1 , _lowercase=32 * 32 , _lowercase=512 , _lowercase=24 , _lowercase=8 , _lowercase=None , _lowercase="quick_gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1e-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=False , _lowercase=False , _lowercase=False , **_lowercase , ):
'''simple docstring'''
__a : int = vocab_size
__a : Union[str, Any] = n_positions
__a : List[str] = n_embd
__a : Union[str, Any] = n_layer
__a : List[str] = n_head
__a : int = n_inner
__a : Any = activation_function
__a : List[str] = resid_pdrop
__a : str = embd_pdrop
__a : str = attn_pdrop
__a : Tuple = layer_norm_epsilon
__a : str = initializer_range
__a : Dict = scale_attn_weights
__a : Optional[int] = use_cache
__a : Optional[Any] = scale_attn_by_inverse_layer_idx
__a : Optional[Any] = reorder_and_upcast_attn
__a : Union[str, Any] = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowercase , **_lowercase )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def lowerCAmelCase__(self , _lowercase , _lowercase = 1 , _lowercase = -1 , _lowercase = False , _lowercase = None , _lowercase = 3 , _lowercase = 32 , _lowercase = 32 , ):
'''simple docstring'''
__a : Any = self._generate_dummy_images(_lowercase , _lowercase , _lowercase , _lowercase )
__a : Union[str, Any] = dict(preprocessor(images=_lowercase , return_tensors=_lowercase ) )
return inputs
| 581
| 0
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : str = ['''model.decoder.embed_positions.weights''']
def __lowerCamelCase ( A__ : Optional[Any] ) -> Dict:
if "emb" in name:
lowerCamelCase_ : int = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
lowerCamelCase_ : Union[str, Any] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
lowerCamelCase_ : Union[str, Any] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
lowerCamelCase_ : List[str] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
lowerCamelCase_ : Any = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
lowerCamelCase_ : Optional[Any] = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
lowerCamelCase_ : Union[str, Any] = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
lowerCamelCase_ : Optional[int] = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
lowerCamelCase_ : List[Any] = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
lowerCamelCase_ : Optional[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
lowerCamelCase_ : List[str] = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def __lowerCamelCase ( A__ : OrderedDict , A__ : int ) -> List[Any]:
lowerCamelCase_ : List[str] = list(state_dict.keys() )
lowerCamelCase_ : int = {}
for key in keys:
lowerCamelCase_ : Optional[int] = state_dict.pop(__UpperCamelCase )
lowerCamelCase_ : int = rename_keys(__UpperCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
lowerCamelCase_ : int = val[:hidden_size, :]
lowerCamelCase_ : int = val[hidden_size : 2 * hidden_size, :]
lowerCamelCase_ : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowerCamelCase_ : Union[str, Any] = val
else:
lowerCamelCase_ : Optional[Any] = val
return state_dict, enc_dec_proj_state_dict
def __lowerCamelCase ( A__ : str ) -> Optional[Any]:
if checkpoint == "small":
# default config values
lowerCamelCase_ : str = 1024
lowerCamelCase_ : List[str] = 24
lowerCamelCase_ : List[Any] = 16
elif checkpoint == "medium":
lowerCamelCase_ : List[str] = 1536
lowerCamelCase_ : str = 48
lowerCamelCase_ : List[Any] = 24
elif checkpoint == "large":
lowerCamelCase_ : Any = 2048
lowerCamelCase_ : Dict = 48
lowerCamelCase_ : str = 32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
lowerCamelCase_ : Dict = MusicgenDecoderConfig(
hidden_size=__UpperCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__UpperCamelCase , num_attention_heads=__UpperCamelCase , )
return config
@torch.no_grad()
def __lowerCamelCase ( A__ : str , A__ : Tuple=None , A__ : str=None , A__ : Optional[Any]="cpu" ) -> Union[str, Any]:
lowerCamelCase_ : Any = MusicGen.get_pretrained(__UpperCamelCase , device=__UpperCamelCase )
lowerCamelCase_ : List[Any] = decoder_config_from_checkpoint(__UpperCamelCase )
lowerCamelCase_ : Optional[Any] = fairseq_model.lm.state_dict()
lowerCamelCase_ : int = rename_state_dict(
__UpperCamelCase , hidden_size=decoder_config.hidden_size )
lowerCamelCase_ : Union[str, Any] = TaEncoderModel.from_pretrained("""t5-base""" )
lowerCamelCase_ : List[str] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
lowerCamelCase_ : Dict = MusicgenForCausalLM(__UpperCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowerCamelCase_ : List[Any] = decoder.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(__UpperCamelCase ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
lowerCamelCase_ : Optional[int] = MusicgenForConditionalGeneration(text_encoder=__UpperCamelCase , audio_encoder=__UpperCamelCase , decoder=__UpperCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__UpperCamelCase )
# check we can do a forward pass
lowerCamelCase_ : Any = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowerCamelCase_ : Any = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowerCamelCase_ : Any = model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
lowerCamelCase_ : List[Any] = AutoTokenizer.from_pretrained("""t5-base""" )
lowerCamelCase_ : Dict = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
lowerCamelCase_ : Dict = MusicgenProcessor(feature_extractor=__UpperCamelCase , tokenizer=__UpperCamelCase )
# set the appropriate bos/pad token ids
lowerCamelCase_ : Union[str, Any] = 2048
lowerCamelCase_ : Optional[int] = 2048
# set other default generation config params
lowerCamelCase_ : Dict = int(30 * audio_encoder.config.frame_rate )
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : Any = 3.0
if pytorch_dump_folder is not None:
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(__UpperCamelCase )
processor.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
snake_case__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
snake_case__ : List[str] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 721
|
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : int ) ->None:
lowerCamelCase_ : dict[str, TrieNode] = {} # Mapping from char to TrieNode
lowerCamelCase_ : str = False
def _lowerCAmelCase ( self : List[Any] , __a : list[str] ) ->None:
for word in words:
self.insert(__a )
def _lowerCAmelCase ( self : Tuple , __a : str ) ->None:
lowerCamelCase_ : int = self
for char in word:
if char not in curr.nodes:
lowerCamelCase_ : Optional[Any] = TrieNode()
lowerCamelCase_ : Union[str, Any] = curr.nodes[char]
lowerCamelCase_ : int = True
def _lowerCAmelCase ( self : Tuple , __a : str ) ->bool:
lowerCamelCase_ : Any = self
for char in word:
if char not in curr.nodes:
return False
lowerCamelCase_ : Any = curr.nodes[char]
return curr.is_leaf
def _lowerCAmelCase ( self : Tuple , __a : str ) ->None:
def _delete(__a : TrieNode , __a : str , __a : int ) -> bool:
if index == len(__a ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCamelCase_ : Tuple = False
return len(curr.nodes ) == 0
lowerCamelCase_ : Dict = word[index]
lowerCamelCase_ : Dict = curr.nodes.get(__a )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCamelCase_ : Any = _delete(__a , __a , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , __a , 0 )
def __lowerCamelCase ( A__ : TrieNode , A__ : str ) -> None:
if node.is_leaf:
print(A__ , end=""" """ )
for key, value in node.nodes.items():
print_words(A__ , word + key )
def __lowerCamelCase ( ) -> bool:
lowerCamelCase_ : List[Any] = """banana bananas bandana band apple all beast""".split()
lowerCamelCase_ : Optional[int] = TrieNode()
root.insert_many(A__ )
# print_words(root, "")
assert all(root.find(A__ ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def __lowerCamelCase ( A__ : str , A__ : bool ) -> None:
print(str(A__ ) , """works!""" if passes else """doesn't work :(""" )
def __lowerCamelCase ( ) -> None:
assert test_trie()
def __lowerCamelCase ( ) -> None:
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 171
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : str = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 336
|
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowercase , lowercase : str = y, x % y
return abs(SCREAMING_SNAKE_CASE__ )
def _snake_case( ) -> Dict:
try:
lowercase : Dict = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
lowercase : Tuple = int(nums[0] )
lowercase : List[Any] = int(nums[1] )
print(
f"greatest_common_divisor({num_a}, {num_a}) = "
f"{greatest_common_divisor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}" )
print(f"By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}" )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 336
| 1
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
SCREAMING_SNAKE_CASE__ : Dict =random.Random()
if is_torch_available():
import torch
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) ->str:
if rng is None:
_lowerCamelCase : Optional[Any] = global_rng
_lowerCamelCase : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=7 , _lowercase=400 , _lowercase=2000 , _lowercase=1 , _lowercase=0.0 , _lowercase=16000 , _lowercase=True , _lowercase=True , ) -> Tuple:
_lowerCamelCase : Any = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Union[str, Any] = min_seq_length
_lowerCamelCase : Dict = max_seq_length
_lowerCamelCase : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase : str = feature_size
_lowerCamelCase : Optional[int] = padding_value
_lowerCamelCase : List[str] = sampling_rate
_lowerCamelCase : List[str] = return_attention_mask
_lowerCamelCase : int = do_normalize
def a__ ( self ) -> str:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , _lowercase=False , _lowercase=False ) -> Dict:
def _flatten(_lowercase ):
return list(itertools.chain(*A__ ) )
if equal_length:
_lowerCamelCase : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_lowerCamelCase : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase : Optional[int] = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _UpperCAmelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
__snake_case = ASTFeatureExtractor
def a__ ( self ) -> Any:
_lowerCamelCase : Optional[Any] = ASTFeatureExtractionTester(self )
def a__ ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCamelCase : Optional[Any] = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
_lowerCamelCase : Tuple = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
_lowerCamelCase : Tuple = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(A__ , A__ , atol=1E-3 ) )
# Test batched
_lowerCamelCase : Tuple = feat_extract(A__ , padding=A__ , return_tensors='''np''' ).input_values
_lowerCamelCase : Tuple = feat_extract(A__ , padding=A__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowerCamelCase : List[str] = np.asarray(A__ )
_lowerCamelCase : Union[str, Any] = feat_extract(A__ , return_tensors='''np''' ).input_values
_lowerCamelCase : Optional[Any] = feat_extract(A__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1E-3 ) )
@require_torch
def a__ ( self ) -> Optional[int]:
import torch
_lowerCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : Tuple = np.random.rand(100 ).astype(np.floataa )
_lowerCamelCase : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase : List[str] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_lowerCamelCase : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def a__ ( self , _lowercase ) -> Optional[Any]:
from datasets import load_dataset
_lowerCamelCase : str = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_lowerCamelCase : str = ds.sort('''id''' ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def a__ ( self ) -> str:
# fmt: off
_lowerCamelCase : Optional[Any] = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
_lowerCamelCase : Any = self._load_datasamples(1 )
_lowerCamelCase : Tuple = ASTFeatureExtractor()
_lowerCamelCase : Dict = feature_extractor(A__ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A__ , atol=1E-4 ) )
| 720
|
"""simple docstring"""
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Tuple:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(SCREAMING_SNAKE_CASE_ , n - 1 , SCREAMING_SNAKE_CASE_ ) * a) % mod
else:
_lowerCamelCase : str = binary_exponentiation(SCREAMING_SNAKE_CASE_ , n / 2 , SCREAMING_SNAKE_CASE_ )
return (b * b) % mod
# a prime number
SCREAMING_SNAKE_CASE__ : str =701
SCREAMING_SNAKE_CASE__ : Any =10_0000_0000
SCREAMING_SNAKE_CASE__ : List[Any] =10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 558
| 0
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
lowerCAmelCase__ : Union[str, Any] =['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
lowerCAmelCase__ : Dict ={'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
lowerCAmelCase__ : str =logging.get_logger(__name__)
lowerCAmelCase__ : str =''' Hello world! cécé herlolip'''
lowerCAmelCase__ : List[Any] =[
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def __lowercase ( a__ ) -> str:
__SCREAMING_SNAKE_CASE = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowercase ( a__ , a__ , a__ ) -> Dict:
__SCREAMING_SNAKE_CASE = dct.pop(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE = val
def __lowercase ( a__ ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )
__SCREAMING_SNAKE_CASE = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def __lowercase ( a__ ) -> List[Any]:
__SCREAMING_SNAKE_CASE = emb.weight.shape
__SCREAMING_SNAKE_CASE = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
@torch.no_grad()
def __lowercase ( a__ , a__ , a__=None ) -> Optional[int]:
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE = torch.hub.load('pytorch/fairseq' , SCREAMING_SNAKE_CASE__ ).eval()
else:
__SCREAMING_SNAKE_CASE = load_xsum_checkpoint(SCREAMING_SNAKE_CASE__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__SCREAMING_SNAKE_CASE = checkpoint_path.replace('.' , '-' )
__SCREAMING_SNAKE_CASE = BartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE = bart.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
__SCREAMING_SNAKE_CASE = BartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ).encode(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).all():
raise ValueError(
f"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
__SCREAMING_SNAKE_CASE = bart.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE = state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE = BartForSequenceClassification(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE = bart.predict('mnli' , SCREAMING_SNAKE_CASE__ , return_logits=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE = model(SCREAMING_SNAKE_CASE__ )[0] # logits
else: # no classification heads to worry about
__SCREAMING_SNAKE_CASE = bart.model.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE = state_dict['decoder.embed_tokens.weight']
__SCREAMING_SNAKE_CASE = bart.extract_features(SCREAMING_SNAKE_CASE__ )
if hf_checkpoint_name == "facebook/bart-large":
__SCREAMING_SNAKE_CASE = BartModel(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE = model(SCREAMING_SNAKE_CASE__ ).model[0]
else:
__SCREAMING_SNAKE_CASE = BartForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(SCREAMING_SNAKE_CASE__ )
if hasattr(SCREAMING_SNAKE_CASE__ , 'lm_head' ):
__SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared )
__SCREAMING_SNAKE_CASE = model.model(SCREAMING_SNAKE_CASE__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase__ : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
lowerCAmelCase__ : Union[str, Any] =parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 148
|
'''simple docstring'''
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCAmelCase :
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=4 , _A="gelu" , _A=0.0 , _A=0.1 , _A=True , _A=512 , _A=16 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ) -> List[Any]:
__a : Dict = parent
__a : str = batch_size
__a : Any = seq_length
__a : int = is_training
__a : List[Any] = use_input_mask
__a : List[Any] = use_token_type_ids
__a : Optional[Any] = use_labels
__a : Tuple = vocab_size
__a : List[str] = hidden_size
__a : Optional[int] = num_hidden_layers
__a : int = num_attention_heads
__a : str = intermediate_multiple_size
__a : Tuple = hidden_act
__a : str = hidden_dropout
__a : Tuple = attention_dropout
__a : str = weight_tying
__a : Any = max_position_embeddings
__a : Optional[int] = type_vocab_size
__a : Optional[int] = type_sequence_label_size
__a : List[Any] = initializer_range
__a : List[Any] = num_labels
__a : Dict = num_choices
__a : List[Any] = scope
def __magic_name__ ( self ) -> Any:
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : str = None
if self.use_input_mask:
__a : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__a : str = None
if self.use_labels:
__a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __magic_name__ ( self ) -> List[str]:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def __magic_name__ ( self ) -> Dict:
__a , __a , __a , __a : Any = self.prepare_config_and_inputs()
__a : List[Any] = True
return config, input_ids, input_mask, token_labels
def __magic_name__ ( self , _A , _A , _A ) -> Optional[int]:
__a : str = GPTNeoXJapaneseModel(config=_A )
model.to(_A )
model.eval()
__a : Optional[Any] = model(_A , attention_mask=_A )
__a : Any = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , _A , _A , _A ) -> List[str]:
__a : int = True
__a : Union[str, Any] = GPTNeoXJapaneseModel(_A )
model.to(_A )
model.eval()
__a : Optional[int] = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , _A , _A , _A , _A ) -> List[Any]:
__a : Optional[Any] = GPTNeoXJapaneseForCausalLM(config=_A )
model.to(_A )
model.eval()
__a : Tuple = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , _A , _A , _A ) -> Tuple:
__a : str = True
__a : Optional[Any] = GPTNeoXJapaneseForCausalLM(config=_A )
model.to(_A )
model.eval()
# first forward pass
__a : int = model(_A , attention_mask=_A , use_cache=_A )
__a : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__a : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__a : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__a : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
__a : Tuple = model(_A , attention_mask=_A , output_hidden_states=_A )
__a : Optional[int] = output_from_no_past['hidden_states'][0]
__a : Optional[int] = model(
_A , attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['hidden_states'][0]
# select random slice
__a : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a : str = output_from_no_past[:, -3:, random_slice_idx].detach()
__a : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1E-3 ) )
def __magic_name__ ( self ) -> Any:
__a : int = self.prepare_config_and_inputs()
__a , __a , __a , __a : Dict = config_and_inputs
__a : int = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_A = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_A = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_A = False
_A = False
_A = False
_A = False
def __magic_name__ ( self ) -> Optional[int]:
__a : Union[str, Any] = GPTNeoXJapaneseModelTester(self )
__a : List[str] = ConfigTester(self , config_class=_A , hidden_size=37 )
def __magic_name__ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def __magic_name__ ( self ) -> Dict:
__a , __a , __a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A )
def __magic_name__ ( self ) -> Tuple:
__a , __a , __a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_A , _A , _A )
def __magic_name__ ( self ) -> Any:
# This regression test was failing with PyTorch < 1.3
__a , __a , __a , __a : str = self.model_tester.prepare_config_and_inputs_for_decoder()
__a : Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(_A , _A , _A )
def __magic_name__ ( self ) -> Tuple:
__a , __a , __a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_A , _A , _A )
def __magic_name__ ( self ) -> str:
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_A )
@slow
def __magic_name__ ( self ) -> List[str]:
__a : Any = 'abeja/gpt-neox-japanese-2.7b'
__a : List[str] = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
__a : Union[str, Any] = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
__a : List[str] = GPTNeoXJapaneseTokenizer.from_pretrained(_A )
__a : Optional[int] = GPTNeoXJapaneseForCausalLM.from_pretrained(_A )
__a : Any = []
for prompt in prompts:
__a : Tuple = tokenizer(_A , return_tensors='pt' ).input_ids
__a : Optional[Any] = model.generate(_A , max_length=50 )
__a : Optional[Any] = tokenizer.batch_decode(_A , skip_special_tokens=_A )
predicted_outputs += generated_string
self.assertListEqual(_A , _A )
| 597
| 0
|
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_A = logging.get_logger(__name__)
# General docstring
_A = 'PoolFormerConfig'
# Base docstring
_A = 'sail/poolformer_s12'
_A = [1, 5_1_2, 7, 7]
# Image classification docstring
_A = 'sail/poolformer_s12'
_A = 'tabby, tabby cat'
_A = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase = 0.0 , __UpperCAmelCase = False ) -> str:
if drop_prob == 0.0 or not training:
return input
lowerCAmelCase__ : Tuple = 1 - drop_prob
lowerCAmelCase__ : str = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
lowerCAmelCase__ : Union[str, Any] = keep_prob + torch.rand(_lowerCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
lowerCAmelCase__ : Union[str, Any] = input.div(_lowerCamelCase ) * random_tensor
return output
class _lowerCamelCase ( nn.Module ):
def __init__( self : Optional[Any] , UpperCamelCase : Optional[float] = None ) -> None:
"""simple docstring"""
super().__init__()
lowerCAmelCase__ : Union[str, Any] = drop_prob
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return drop_path(UpperCamelCase , self.drop_prob , self.training )
def _lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return "p={}".format(self.drop_prob )
class _lowerCamelCase ( nn.Module ):
def __init__( self : Dict , UpperCamelCase : List[Any] , UpperCamelCase : Dict , UpperCamelCase : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[str]=None ) -> str:
"""simple docstring"""
super().__init__()
lowerCAmelCase__ : int = patch_size if isinstance(UpperCamelCase , collections.abc.Iterable ) else (patch_size, patch_size)
lowerCAmelCase__ : str = stride if isinstance(UpperCamelCase , collections.abc.Iterable ) else (stride, stride)
lowerCAmelCase__ : Dict = padding if isinstance(UpperCamelCase , collections.abc.Iterable ) else (padding, padding)
lowerCAmelCase__ : Optional[int] = nn.Convad(UpperCamelCase , UpperCamelCase , kernel_size=UpperCamelCase , stride=UpperCamelCase , padding=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = norm_layer(UpperCamelCase ) if norm_layer else nn.Identity()
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.projection(UpperCamelCase )
lowerCAmelCase__ : str = self.norm(UpperCamelCase )
return embeddings
class _lowerCamelCase ( nn.GroupNorm ):
def __init__( self : Dict , UpperCamelCase : int , **UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
super().__init__(1 , UpperCamelCase , **UpperCamelCase )
class _lowerCamelCase ( nn.Module ):
def __init__( self : List[Any] , UpperCamelCase : Optional[int] ) -> Any:
"""simple docstring"""
super().__init__()
lowerCAmelCase__ : Any = nn.AvgPoolad(UpperCamelCase , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCamelCase )
def _lowerCAmelCase ( self : str , UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
return self.pool(UpperCamelCase ) - hidden_states
class _lowerCamelCase ( nn.Module ):
def __init__( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : List[str] ) -> Any:
"""simple docstring"""
super().__init__()
lowerCAmelCase__ : List[str] = nn.Convad(UpperCamelCase , UpperCamelCase , 1 )
lowerCAmelCase__ : Optional[int] = nn.Convad(UpperCamelCase , UpperCamelCase , 1 )
lowerCAmelCase__ : Dict = PoolFormerDropPath(UpperCamelCase )
if isinstance(config.hidden_act , UpperCamelCase ):
lowerCAmelCase__ : Tuple = ACTaFN[config.hidden_act]
else:
lowerCAmelCase__ : int = config.hidden_act
def _lowerCAmelCase ( self : str , UpperCamelCase : Dict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.conva(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = self.act_fn(UpperCamelCase )
lowerCAmelCase__ : str = self.drop(UpperCamelCase )
lowerCAmelCase__ : List[Any] = self.conva(UpperCamelCase )
lowerCAmelCase__ : Any = self.drop(UpperCamelCase )
return hidden_states
class _lowerCamelCase ( nn.Module ):
def __init__( self : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : List[str] , UpperCamelCase : Any ) -> List[str]:
"""simple docstring"""
super().__init__()
lowerCAmelCase__ : Dict = PoolFormerPooling(UpperCamelCase )
lowerCAmelCase__ : Any = PoolFormerOutput(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[str] = PoolFormerGroupNorm(UpperCamelCase )
lowerCAmelCase__ : Tuple = PoolFormerGroupNorm(UpperCamelCase )
# Useful for training neural nets
lowerCAmelCase__ : Tuple = PoolFormerDropPath(UpperCamelCase ) if drop_path > 0.0 else nn.Identity()
lowerCAmelCase__ : Union[str, Any] = config.use_layer_scale
if config.use_layer_scale:
lowerCAmelCase__ : List[str] = nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase) ) , requires_grad=UpperCamelCase )
lowerCAmelCase__ : Tuple = nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase) ) , requires_grad=UpperCamelCase )
def _lowerCAmelCase ( self : Dict , UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
if self.use_layer_scale:
lowerCAmelCase__ : int = self.pooling(self.before_norm(UpperCamelCase ) )
lowerCAmelCase__ : int = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
lowerCAmelCase__ : str = hidden_states + self.drop_path(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = ()
lowerCAmelCase__ : str = self.output(self.after_norm(UpperCamelCase ) )
lowerCAmelCase__ : str = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
lowerCAmelCase__ : Any = hidden_states + self.drop_path(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = (output,) + outputs
return outputs
else:
lowerCAmelCase__ : Tuple = self.drop_path(self.pooling(self.before_norm(UpperCamelCase ) ) )
# First residual connection
lowerCAmelCase__ : List[Any] = pooling_output + hidden_states
lowerCAmelCase__ : List[Any] = ()
# Second residual connection inside the PoolFormerOutput block
lowerCAmelCase__ : Optional[int] = self.drop_path(self.output(self.after_norm(UpperCamelCase ) ) )
lowerCAmelCase__ : List[str] = hidden_states + layer_output
lowerCAmelCase__ : str = (output,) + outputs
return outputs
class _lowerCamelCase ( nn.Module ):
def __init__( self : int , UpperCamelCase : str ) -> Optional[int]:
"""simple docstring"""
super().__init__()
lowerCAmelCase__ : Tuple = config
# stochastic depth decay rule
lowerCAmelCase__ : Union[str, Any] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
lowerCAmelCase__ : Union[str, Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
lowerCAmelCase__ : Optional[Any] = nn.ModuleList(UpperCamelCase )
# Transformer blocks
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Any = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
lowerCAmelCase__ : Any = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
UpperCamelCase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(UpperCamelCase ) )
lowerCAmelCase__ : List[str] = nn.ModuleList(UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : Dict=False , UpperCamelCase : int=True ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = () if output_hidden_states else None
lowerCAmelCase__ : Optional[Any] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
lowerCAmelCase__ : int = layers
# Get patch embeddings from hidden_states
lowerCAmelCase__ : Union[str, Any] = embedding_layer(UpperCamelCase )
# Send the embeddings through the blocks
for _, blk in enumerate(UpperCamelCase ):
lowerCAmelCase__ : str = blk(UpperCamelCase )
lowerCAmelCase__ : Dict = layer_outputs[0]
if output_hidden_states:
lowerCAmelCase__ : Optional[Any] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase , hidden_states=UpperCamelCase )
class _lowerCamelCase ( __lowercase ):
_lowerCamelCase :Tuple = PoolFormerConfig
_lowerCamelCase :Union[str, Any] = "poolformer"
_lowerCamelCase :int = "pixel_values"
_lowerCamelCase :Any = True
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
if isinstance(UpperCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : Tuple , UpperCamelCase : Any=False ) -> List[Any]:
"""simple docstring"""
if isinstance(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : List[str] = value
_A = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_A = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , __lowercase , )
class _lowerCamelCase ( __lowercase ):
def __init__( self : Optional[int] , UpperCamelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(UpperCamelCase )
lowerCAmelCase__ : Tuple = config
lowerCAmelCase__ : int = PoolFormerEncoder(UpperCamelCase )
# Initialize weights and apply final processing
self.post_init()
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
lowerCAmelCase__ : Optional[int] = self.encoder(
UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase , )
lowerCAmelCase__ : Optional[Any] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCamelCase , hidden_states=encoder_outputs.hidden_states , )
class _lowerCamelCase ( nn.Module ):
def __init__( self : Tuple , UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
super().__init__()
lowerCAmelCase__ : Any = nn.Linear(config.hidden_size , config.hidden_size )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.dense(UpperCamelCase )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , __lowercase , )
class _lowerCamelCase ( __lowercase ):
def __init__( self : str , UpperCamelCase : str ) -> Any:
"""simple docstring"""
super().__init__(UpperCamelCase )
lowerCAmelCase__ : str = config.num_labels
lowerCAmelCase__ : Optional[int] = PoolFormerModel(UpperCamelCase )
# Final norm
lowerCAmelCase__ : Optional[Any] = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
lowerCAmelCase__ : str = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[torch.LongTensor] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
lowerCAmelCase__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ : Optional[Any] = self.poolformer(
UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase , )
lowerCAmelCase__ : Optional[Any] = outputs[0]
lowerCAmelCase__ : Any = self.classifier(self.norm(UpperCamelCase ).mean([-2, -1] ) )
lowerCAmelCase__ : Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase__ : int = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase__ : Optional[Any] = "single_label_classification"
else:
lowerCAmelCase__ : Dict = "multi_label_classification"
if self.config.problem_type == "regression":
lowerCAmelCase__ : int = MSELoss()
if self.num_labels == 1:
lowerCAmelCase__ : Any = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase__ : Optional[Any] = loss_fct(UpperCamelCase , UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase__ : str = CrossEntropyLoss()
lowerCAmelCase__ : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase__ : List[Any] = BCEWithLogitsLoss()
lowerCAmelCase__ : Optional[int] = loss_fct(UpperCamelCase , UpperCamelCase )
if not return_dict:
lowerCAmelCase__ : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase , logits=UpperCamelCase , hidden_states=outputs.hidden_states )
| 706
|
"""simple docstring"""
import operator
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None ) -> list:
lowerCAmelCase__ : int = operator.lt if reverse else operator.gt
lowerCAmelCase__ : Optional[int] = solution or []
if not arr:
return solution
lowerCAmelCase__ : Tuple = [arr.pop(0 )]
for i, item in enumerate(__UpperCAmelCase ):
if _operator(__UpperCAmelCase , sublist[-1] ):
sublist.append(__UpperCAmelCase )
arr.pop(__UpperCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(__UpperCAmelCase )
else:
while sublist:
lowerCAmelCase__ : Any = sublist.pop(0 )
for i, xx in enumerate(__UpperCAmelCase ):
if not _operator(__UpperCAmelCase , __UpperCAmelCase ):
solution.insert(__UpperCAmelCase , __UpperCAmelCase )
break
else:
solution.append(__UpperCAmelCase )
strand_sort(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 507
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.