code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
lowercase : Optional[int] = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 649
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : Optional[Any] = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 649
| 1
|
'''simple docstring'''
import numpy as np
a__ : int = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : List[str] ):
UpperCAmelCase = np.array(a__ )
def __snake_case ( self : Any , a__ : str ):
UpperCAmelCase, UpperCAmelCase = np.where(letter == self.SQUARE )
UpperCAmelCase = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __snake_case ( self : Optional[Any] , a__ : int , a__ : int ):
UpperCAmelCase = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __snake_case ( self : List[Any] , a__ : str ):
UpperCAmelCase = message.lower()
UpperCAmelCase = message.replace(''' ''' , '''''' )
UpperCAmelCase = message.replace('''j''' , '''i''' )
UpperCAmelCase = np.empty((2, len(a__ )) )
for letter_index in range(len(a__ ) ):
UpperCAmelCase = self.letter_to_numbers(message[letter_index] )
UpperCAmelCase = numbers[0]
UpperCAmelCase = numbers[1]
UpperCAmelCase = first_step.reshape(2 * len(a__ ) )
UpperCAmelCase = ''''''
for numbers_index in range(len(a__ ) ):
UpperCAmelCase = int(second_step[numbers_index * 2] )
UpperCAmelCase = int(second_step[(numbers_index * 2) + 1] )
UpperCAmelCase = self.numbers_to_letter(a__ , a__ )
UpperCAmelCase = encoded_message + letter
return encoded_message
def __snake_case ( self : List[str] , a__ : str ):
UpperCAmelCase = message.lower()
message.replace(''' ''' , '''''' )
UpperCAmelCase = np.empty(2 * len(a__ ) )
for letter_index in range(len(a__ ) ):
UpperCAmelCase = self.letter_to_numbers(message[letter_index] )
UpperCAmelCase = numbers[0]
UpperCAmelCase = numbers[1]
UpperCAmelCase = first_step.reshape((2, len(a__ )) )
UpperCAmelCase = ''''''
for numbers_index in range(len(a__ ) ):
UpperCAmelCase = int(second_step[0, numbers_index] )
UpperCAmelCase = int(second_step[1, numbers_index] )
UpperCAmelCase = self.numbers_to_letter(a__ , a__ )
UpperCAmelCase = decoded_message + letter
return decoded_message
| 715
|
'''simple docstring'''
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
for n in range(1 , 1_000_000 ):
yield n * (n + 1) // 2
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = 1
UpperCAmelCase = 2
while i * i <= n:
UpperCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __snake_case ( ) -> str:
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(SCREAMING_SNAKE_CASE_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 570
| 0
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowerCamelCase = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
lowerCamelCase = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
lowerCamelCase = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int = CHRF.CHAR_ORDER , _UpperCAmelCase : int = CHRF.WORD_ORDER , _UpperCAmelCase : int = CHRF.BETA , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = len(references[0] )
if any(len(_UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
UpperCAmelCase_ = [[refs[i] for refs in references] for i in range(_UpperCAmelCase )]
UpperCAmelCase_ = CHRF(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = sb_chrf.corpus_score(_UpperCAmelCase , _UpperCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 82
|
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float(moles / volume ) * nfactor )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82
| 1
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowercase : Optional[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ):
# Recurse if needed
if "." in tensor_name:
_UpperCamelCase = tensor_name.split('''.''' )
for split in splits[:-1]:
_UpperCamelCase = getattr(lowerCAmelCase , lowerCAmelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
_UpperCamelCase = new_module
_UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
_UpperCamelCase = tensor_name in module._buffers
_UpperCamelCase = getattr(lowerCAmelCase , lowerCAmelCase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
_UpperCamelCase = False
_UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
_UpperCamelCase = False
_UpperCamelCase = False
else:
_UpperCamelCase = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
_UpperCamelCase = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
_UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_UpperCamelCase = old_value.to(lowerCAmelCase )
elif isinstance(lowerCAmelCase , torch.Tensor ):
_UpperCamelCase = value.to('''cpu''' )
if value.dtype == torch.inta:
_UpperCamelCase = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
_UpperCamelCase = torch.tensor(lowerCAmelCase , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , lowerCAmelCase ) and fpaa_statistics is None:
_UpperCamelCase = new_value.T
_UpperCamelCase = old_value.__dict__
if is_abit:
_UpperCamelCase = bnb.nn.IntaParams(lowerCAmelCase , requires_grad=lowerCAmelCase , **lowerCAmelCase ).to(lowerCAmelCase )
elif is_abit:
_UpperCamelCase = bnb.nn.Paramsabit(lowerCAmelCase , requires_grad=lowerCAmelCase , **lowerCAmelCase ).to(lowerCAmelCase )
_UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(lowerCAmelCase ) )
else:
if value is None:
_UpperCamelCase = old_value.to(lowerCAmelCase )
elif isinstance(lowerCAmelCase , torch.Tensor ):
_UpperCamelCase = value.to(lowerCAmelCase )
else:
_UpperCamelCase = torch.tensor(lowerCAmelCase , device=lowerCAmelCase )
if is_buffer:
_UpperCamelCase = new_value
else:
_UpperCamelCase = nn.Parameter(lowerCAmelCase , requires_grad=old_value.requires_grad )
_UpperCamelCase = new_value
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=False ):
for name, module in model.named_children():
if current_key_name is None:
_UpperCamelCase = []
current_key_name.append(lowerCAmelCase )
if (isinstance(lowerCAmelCase , nn.Linear ) or isinstance(lowerCAmelCase , lowerCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(lowerCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_UpperCamelCase , _UpperCamelCase = module.weight.shape
else:
_UpperCamelCase = module.in_features
_UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
_UpperCamelCase = bnb.nn.LinearabitLt(
lowerCAmelCase , lowerCAmelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
_UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_UpperCamelCase = bnb.nn.Linearabit(
lowerCAmelCase , lowerCAmelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
_UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
_UpperCamelCase = type(lowerCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(lowerCAmelCase )
if len(list(module.children() ) ) > 0:
_UpperCamelCase , _UpperCamelCase = _replace_with_bnb_linear(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , has_been_replaced=lowerCAmelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None ):
_UpperCamelCase = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
_UpperCamelCase , _UpperCamelCase = _replace_with_bnb_linear(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def SCREAMING_SNAKE_CASE ( *lowerCAmelCase , **lowerCAmelCase ):
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , lowerCAmelCase , )
return replace_with_bnb_linear(*lowerCAmelCase , **lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( *lowerCAmelCase , **lowerCAmelCase ):
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , lowerCAmelCase , )
return set_module_quantized_tensor_to_device(*lowerCAmelCase , **lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
_UpperCamelCase = deepcopy(lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_UpperCamelCase = find_tied_parameters(lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_UpperCamelCase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_UpperCamelCase = sum(lowerCAmelCase , [] )
_UpperCamelCase = len(lowerCAmelCase ) > 0
# Check if it is a base model
_UpperCamelCase = not hasattr(lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_UpperCamelCase = list(model.named_children() )
_UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
_UpperCamelCase = set(lowerCAmelCase ) - set(lowerCAmelCase )
_UpperCamelCase = list(set(lowerCAmelCase ) ) + list(lowerCAmelCase )
# remove ".weight" from the keys
_UpperCamelCase = ['''.weight''', '''.bias''']
_UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_UpperCamelCase = name.replace(lowerCAmelCase , '''''' )
filtered_module_names.append(lowerCAmelCase )
return filtered_module_names
| 105
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : List[Any] = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __A( __UpperCAmelCase ):
__A = "detr"
__A = ["past_key_values"]
__A = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self, A=True, A=None, A=3, A=100, A=6, A=2048, A=8, A=6, A=2048, A=8, A=0.0, A=0.0, A=True, A="relu", A=256, A=0.1, A=0.0, A=0.0, A=0.02, A=1.0, A=False, A="sine", A="resnet50", A=True, A=False, A=1, A=5, A=2, A=1, A=1, A=5, A=2, A=0.1, **A, ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A, A ):
_UpperCamelCase = backbone_config.get('''model_type''' )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(A )
# set timm attributes to None
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None, None, None
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = encoder_layers
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=A, **A )
@property
def _UpperCamelCase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _UpperCamelCase ( self ):
"""simple docstring"""
return self.d_model
@classmethod
def _UpperCamelCase ( cls, A, **A ):
"""simple docstring"""
return cls(backbone_config=A, **A )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
class __A( __UpperCAmelCase ):
__A = version.parse("1.11" )
@property
def _UpperCamelCase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _UpperCamelCase ( self ):
"""simple docstring"""
return 1E-5
@property
def _UpperCamelCase ( self ):
"""simple docstring"""
return 12
| 105
| 1
|
import unittest
from knapsack import knapsack as k
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[str] ) -> Dict:
__snake_case : Any = 0
__snake_case : List[str] = [0]
__snake_case : List[str] = [0]
__snake_case : List[str] = len(lowerCamelCase )
self.assertEqual(k.knapsack(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) , 0 )
__snake_case : str = [60]
__snake_case : Union[str, Any] = [10]
__snake_case : Optional[int] = len(lowerCamelCase )
self.assertEqual(k.knapsack(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) , 0 )
def __snake_case ( self : List[Any] ) -> List[str]:
__snake_case : int = 3
__snake_case : List[str] = [1, 2, 3]
__snake_case : Any = [3, 2, 1]
__snake_case : Tuple = len(lowerCamelCase )
self.assertEqual(k.knapsack(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) , 5 )
def __snake_case ( self : Dict ) -> List[str]:
__snake_case : Optional[int] = 50
__snake_case : int = [60, 100, 120]
__snake_case : Dict = [10, 20, 30]
__snake_case : List[str] = len(lowerCamelCase )
self.assertEqual(k.knapsack(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 81
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ : str = logging.getLogger(__name__)
def lowerCamelCase( a__ ,a__):
return (preds == labels).mean()
@dataclass
class A__ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A__ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' ,a__)
# Set seed
set_seed(training_args.seed)
try:
_SCREAMING_SNAKE_CASE =processors[data_args.task_name]()
_SCREAMING_SNAKE_CASE =processor.get_labels()
_SCREAMING_SNAKE_CASE =len(a__)
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a__ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,)
# Get datasets
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(a__) -> Dict:
_SCREAMING_SNAKE_CASE =np.argmax(p.predictions ,axis=1)
return {"acc": simple_accuracy(a__ ,p.label_ids)}
# Data collator
_SCREAMING_SNAKE_CASE =DataCollatorWithPadding(a__ ,pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=a__ ,eval_dataset=a__ ,compute_metrics=a__ ,data_collator=a__ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_SCREAMING_SNAKE_CASE ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
_SCREAMING_SNAKE_CASE =trainer.evaluate()
_SCREAMING_SNAKE_CASE =os.path.join(training_args.output_dir ,'''eval_results.txt''')
if trainer.is_world_master():
with open(a__ ,'''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(''' %s = %s''' ,a__ ,a__)
writer.write('''%s = %s\n''' % (key, value))
results.update(a__)
return results
def lowerCamelCase( a__):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 691
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : int = logging.get_logger(__name__)
A : List[Any] = {
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class lowerCamelCase ( __UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = "xlm"
_SCREAMING_SNAKE_CASE = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self : Dict , __snake_case : Union[str, Any]=3_01_45 , __snake_case : Any=20_48 , __snake_case : Union[str, Any]=12 , __snake_case : Dict=16 , __snake_case : Optional[int]=0.1 , __snake_case : List[str]=0.1 , __snake_case : str=True , __snake_case : Optional[Any]=False , __snake_case : Dict=False , __snake_case : Tuple=False , __snake_case : str=1 , __snake_case : Dict=True , __snake_case : List[str]=5_12 , __snake_case : Dict=20_48**-0.5 , __snake_case : int=1e-12 , __snake_case : Any=0.02 , __snake_case : Optional[int]=0 , __snake_case : List[Any]=1 , __snake_case : Dict=2 , __snake_case : Union[str, Any]=3 , __snake_case : Optional[int]=5 , __snake_case : Tuple=True , __snake_case : Optional[Any]="first" , __snake_case : Any=True , __snake_case : str=None , __snake_case : Tuple=True , __snake_case : Optional[Any]=0.1 , __snake_case : Any=5 , __snake_case : Dict=5 , __snake_case : str=0 , __snake_case : int=0 , __snake_case : str=2 , __snake_case : Any=0 , **__snake_case : Optional[Any] , ):
'''simple docstring'''
_snake_case: Optional[int] = vocab_size
_snake_case: str = emb_dim
_snake_case: Union[str, Any] = n_layers
_snake_case: str = n_heads
_snake_case: Tuple = dropout
_snake_case: Dict = attention_dropout
_snake_case: int = gelu_activation
_snake_case: Optional[int] = sinusoidal_embeddings
_snake_case: Optional[int] = causal
_snake_case: str = asm
_snake_case: Tuple = n_langs
_snake_case: Tuple = use_lang_emb
_snake_case: List[str] = layer_norm_eps
_snake_case: Union[str, Any] = bos_index
_snake_case: Optional[Any] = eos_index
_snake_case: Any = pad_index
_snake_case: Dict = unk_index
_snake_case: Optional[Any] = mask_index
_snake_case: List[Any] = is_encoder
_snake_case: str = max_position_embeddings
_snake_case: Optional[int] = embed_init_std
_snake_case: List[Any] = init_std
_snake_case: int = summary_type
_snake_case: int = summary_use_proj
_snake_case: Tuple = summary_activation
_snake_case: Any = summary_proj_to_labels
_snake_case: str = summary_first_dropout
_snake_case: Any = start_n_top
_snake_case: Dict = end_n_top
_snake_case: Optional[int] = mask_token_id
_snake_case: int = lang_id
if "n_words" in kwargs:
_snake_case: int = kwargs['n_words']
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , **__snake_case )
class lowerCamelCase ( __UpperCAmelCase ):
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case: Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case: Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : Optional[Any] = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = ['DeiTFeatureExtractor']
A : Optional[int] = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 273
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__A = logging.get_logger(__name__)
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 586
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = "distilbert"
_UpperCAmelCase :Any = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=512 , _UpperCAmelCase=False , _UpperCAmelCase=6 , _UpperCAmelCase=12 , _UpperCAmelCase=768 , _UpperCAmelCase=4 * 768 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.02 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.2 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
lowercase__: Any = vocab_size
lowercase__: Optional[int] = max_position_embeddings
lowercase__: int = sinusoidal_pos_embds
lowercase__: Dict = n_layers
lowercase__: List[str] = n_heads
lowercase__: Tuple = dim
lowercase__: Union[str, Any] = hidden_dim
lowercase__: List[str] = dropout
lowercase__: Optional[int] = attention_dropout
lowercase__: Dict = activation
lowercase__: Union[str, Any] = initializer_range
lowercase__: Optional[Any] = qa_dropout
lowercase__: Dict = seq_classif_dropout
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
@property
def _snake_case ( self ):
if self.task == "multiple-choice":
lowercase__: Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase__: Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 586
| 1
|
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=128 , lowerCamelCase=32 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ):
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
def a__ ( self ):
(
__a
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = NezhaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
__a = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
__a = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = True
__a = NezhaModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , )
__a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , )
__a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = NezhaForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = NezhaForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = NezhaForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , next_sentence_label=lowerCamelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = NezhaForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.num_labels
__a = NezhaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.num_labels
__a = NezhaForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.num_choices
__a = NezhaForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
(
__a
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase ):
_snake_case : int = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_snake_case : List[str] = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case : Union[str, Any] = True
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
__a = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
__a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase_ )
__a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def a__ ( self ):
__a = NezhaModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase_ )
def a__ ( self ):
(
__a
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__a = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowerCamelCase_ )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def a__ ( self ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = NezhaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@slow
@require_torch_gpu
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__a = True
__a = model_class(config=lowerCamelCase_ )
__a = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
__a = torch.jit.trace(
lowerCamelCase_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , "bert.pt" ) )
__a = torch.jit.load(os.path.join(lowerCamelCase_ , "bert.pt" ) , map_location=lowerCamelCase_ )
loaded(inputs_dict["input_ids"].to(lowerCamelCase_ ) , inputs_dict["attention_mask"].to(lowerCamelCase_ ) )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
__a = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__a = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )[0]
__a = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , lowerCamelCase_ )
__a = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1E-4 ) )
@slow
def a__ ( self ):
__a = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
__a = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__a = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )[0]
__a = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , lowerCamelCase_ )
__a = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1E-4 ) )
| 718
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__:Union[str, Any] = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class snake_case__ ( snake_case_ ):
_snake_case : Tuple = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : List[str] = ["""input_ids""", """attention_mask"""]
_snake_case : Dict = GPTaTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
__a = kwargs.pop("add_bos_token" , lowerCamelCase )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 67
| 0
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _UpperCamelCase ( lowerCAmelCase_ ) ->Optional[Any]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_E_0_0 and cp <= 0x9_F_F_F)
or (cp >= 0x3_4_0_0 and cp <= 0x4_D_B_F) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_A_6_D_F) #
or (cp >= 0x2_A_7_0_0 and cp <= 0x2_B_7_3_F) #
or (cp >= 0x2_B_7_4_0 and cp <= 0x2_B_8_1_F) #
or (cp >= 0x2_B_8_2_0 and cp <= 0x2_C_E_A_F) #
or (cp >= 0xF_9_0_0 and cp <= 0xF_A_F_F)
or (cp >= 0x2_F_8_0_0 and cp <= 0x2_F_A_1_F) #
): #
return True
return False
def _UpperCamelCase ( lowerCAmelCase_ ) ->List[str]:
# word like '180' or '身高' or '神'
for char in word:
UpperCAmelCase = ord(lowerCAmelCase_ )
if not _is_chinese_char(lowerCAmelCase_ ):
return 0
return 1
def _UpperCamelCase ( lowerCAmelCase_ ) ->Optional[int]:
UpperCAmelCase = set()
for token in tokens:
UpperCAmelCase = len(lowerCAmelCase_ ) > 1 and is_chinese(lowerCAmelCase_ )
if chinese_word:
word_set.add(lowerCAmelCase_ )
UpperCAmelCase = list(lowerCAmelCase_ )
return word_list
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Union[str, Any]:
if not chinese_word_set:
return bert_tokens
UpperCAmelCase = max([len(lowerCAmelCase_ ) for w in chinese_word_set] )
UpperCAmelCase = bert_tokens
UpperCAmelCase , UpperCAmelCase = 0, len(lowerCAmelCase_ )
while start < end:
UpperCAmelCase = True
if is_chinese(bert_word[start] ):
UpperCAmelCase = min(end - start , lowerCAmelCase_ )
for i in range(lowerCAmelCase_ , 1 , -1 ):
UpperCAmelCase = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCAmelCase = """##""" + bert_word[j]
UpperCAmelCase = start + i
UpperCAmelCase = False
break
if single_word:
start += 1
return bert_word
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->Dict:
UpperCAmelCase = []
for i in range(0 , len(lowerCAmelCase_ ) , 1_0_0 ):
UpperCAmelCase = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=["""cws"""] ).cws
UpperCAmelCase = [get_chinese_word(lowerCAmelCase_ ) for r in res]
ltp_res.extend(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
UpperCAmelCase = []
for i in range(0 , len(lowerCAmelCase_ ) , 1_0_0 ):
UpperCAmelCase = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=5_1_2 )
bert_res.extend(res["""input_ids"""] )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
UpperCAmelCase = []
for input_ids, chinese_word in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase = []
for id in input_ids:
UpperCAmelCase = bert_tokenizer._convert_id_to_token(lowerCAmelCase_ )
input_tokens.append(lowerCAmelCase_ )
UpperCAmelCase = add_sub_symbol(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCAmelCase_ ):
if token[:2] == "##":
UpperCAmelCase = token[2:]
# save chinese tokens' pos
if len(lowerCAmelCase_ ) == 1 and _is_chinese_char(ord(lowerCAmelCase_ ) ):
ref_id.append(lowerCAmelCase_ )
ref_ids.append(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
return ref_ids
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
UpperCAmelCase = f.readlines()
UpperCAmelCase = [line.strip() for line in data if len(lowerCAmelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCAmelCase = LTP(args.ltp ) # faster in GPU device
UpperCAmelCase = BertTokenizer.from_pretrained(args.bert )
UpperCAmelCase = prepare_ref(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
UpperCAmelCase = [json.dumps(lowerCAmelCase_ ) + """\n""" for ref in ref_ids]
f.writelines(lowerCAmelCase_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
__a = parser.parse_args()
main(args)
| 377
|
import argparse
import os
import re
import packaging.version
__a = """examples/"""
__a = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
__a = {
"""init""": """src/diffusers/__init__.py""",
"""setup""": """setup.py""",
}
__a = """README.md"""
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->Optional[Any]:
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase , UpperCAmelCase = REPLACE_PATTERNS[pattern]
UpperCAmelCase = replace.replace("""VERSION""" , lowerCAmelCase_ )
UpperCAmelCase = re_pattern.sub(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(lowerCAmelCase_ )
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
for folder, directories, fnames in os.walk(lowerCAmelCase_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , pattern="""examples""" )
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) ->Optional[int]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not patch:
update_version_in_examples(lowerCAmelCase_ )
def _UpperCamelCase ( ) ->List[str]:
UpperCAmelCase = """🤗 Transformers currently provides the following architectures"""
UpperCAmelCase = """1. Want to contribute a new model?"""
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase = f.readlines()
# Find the start of the list.
UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
UpperCAmelCase = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lowerCAmelCase_ )
def _UpperCamelCase ( ) ->List[str]:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = REPLACE_PATTERNS["""init"""][0].search(lowerCAmelCase_ ).groups()[0]
return packaging.version.parse(lowerCAmelCase_ )
def _UpperCamelCase ( lowerCAmelCase_=False ) ->int:
UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
UpperCAmelCase = default_version.base_version
elif patch:
UpperCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" )
if len(lowerCAmelCase_ ) == 0:
UpperCAmelCase = default_version
print(F"""Updating version to {version}.""" )
global_version_update(lowerCAmelCase_ , patch=lowerCAmelCase_ )
def _UpperCamelCase ( ) ->int:
UpperCAmelCase = get_version()
UpperCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(lowerCAmelCase_ ) == 0:
UpperCAmelCase = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(lowerCAmelCase_ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
__a = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 377
| 1
|
"""simple docstring"""
from __future__ import annotations
from random import choice
def lowerCAmelCase_( lowercase_ : Tuple ) -> Optional[int]:
return choice(lowercase_ )
def lowerCAmelCase_( lowercase_ : list[int] , lowercase_ : int ) -> int:
_lowerCamelCase = random_pivot(lowercase_ )
# partition based on pivot
# linear time
_lowerCamelCase = [e for e in lst if e < pivot]
_lowerCamelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowercase_ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowercase_ ) < k - 1:
return kth_number(lowercase_ , k - len(lowercase_ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623
| 0
|
import collections
import importlib.util
import os
import re
from pathlib import Path
__A ='''src/transformers'''
# Matches is_xxx_available()
__A =re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
__A =re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__A =re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
__A =re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
__A =re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__A =re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
__A =re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
__A =re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
__A =re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
__A =re.compile(R'''^\s*try:''')
# Catches a line with else:
__A =re.compile(R'''^\s*else:''')
def lowerCamelCase_ ( lowerCamelCase__ ):
if _re_test_backend.search(lowerCamelCase__ ) is None:
return None
lowerCamelCase_ = [b[0] for b in _re_backend.findall(lowerCamelCase__ )]
backends.sort()
return "_and_".join(lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ ):
with open(lowerCamelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase_ = f.readlines()
lowerCamelCase_ = 0
while line_index < len(lowerCamelCase__ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCamelCase__ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCamelCase_ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowerCamelCase_ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCamelCase__ ):
lowerCamelCase_ = _re_one_line_import_struct.search(lowerCamelCase__ ).groups()[0]
lowerCamelCase_ = re.findall("\[([^\]]+)\]" , lowerCamelCase__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowerCamelCase_ = _re_import_struct_key_value.search(lowerCamelCase__ )
if single_line_import_search is not None:
lowerCamelCase_ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(lowerCamelCase__ ) > 0]
objects.extend(lowerCamelCase__ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowerCamelCase_ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCamelCase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCamelCase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCamelCase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowerCamelCase_ = lines[line_index]
if _re_import_struct_add_one.search(lowerCamelCase__ ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCamelCase__ ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCamelCase__ ) is not None:
lowerCamelCase_ = _re_import_struct_add_many.search(lowerCamelCase__ ).groups()[0].split(", " )
lowerCamelCase_ = [obj[1:-1] for obj in imports if len(lowerCamelCase__ ) > 0]
objects.extend(lowerCamelCase__ )
elif _re_between_brackets.search(lowerCamelCase__ ) is not None:
lowerCamelCase_ = _re_between_brackets.search(lowerCamelCase__ ).groups()[0].split(", " )
lowerCamelCase_ = [obj[1:-1] for obj in imports if len(lowerCamelCase__ ) > 0]
objects.extend(lowerCamelCase__ )
elif _re_quote_object.search(lowerCamelCase__ ) is not None:
objects.append(_re_quote_object.search(lowerCamelCase__ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 1_2 + "\"" ):
objects.append(line[1_3:-3] )
line_index += 1
lowerCamelCase_ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCamelCase_ = []
while (
line_index < len(lowerCamelCase__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowerCamelCase_ = lines[line_index]
lowerCamelCase_ = _re_import.search(lowerCamelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCamelCase_ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCamelCase__ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCamelCase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCamelCase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCamelCase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowerCamelCase_ = lines[line_index]
lowerCamelCase_ = _re_import.search(lowerCamelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
lowerCamelCase_ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
def find_duplicates(lowerCamelCase__ ):
return [k for k, v in collections.Counter(lowerCamelCase__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCamelCase_ = []
for key in import_dict_objects.keys():
lowerCamelCase_ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
lowerCamelCase_ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCamelCase_ = "base imports" if key == "none" else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def lowerCamelCase_ ( ):
lowerCamelCase_ = []
for root, _, files in os.walk(lowerCamelCase__ ):
if "__init__.py" in files:
lowerCamelCase_ = os.path.join(lowerCamelCase__ , "__init__.py" )
lowerCamelCase_ = parse_init(lowerCamelCase__ )
if objects is not None:
lowerCamelCase_ = analyze_results(*lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
lowerCamelCase_ = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("\n".join(lowerCamelCase__ ) )
if len(lowerCamelCase__ ) > 0:
raise ValueError("\n\n".join(lowerCamelCase__ ) )
def lowerCamelCase_ ( ):
lowerCamelCase_ = []
for path, directories, files in os.walk(lowerCamelCase__ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(lowerCamelCase__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCamelCase__ ) / folder).glob("*.py" ) ) ) == 0:
continue
lowerCamelCase_ = str((Path(lowerCamelCase__ ) / folder).relative_to(lowerCamelCase__ ) )
lowerCamelCase_ = short_path.replace(os.path.sep , "." )
submodules.append(lowerCamelCase__ )
for fname in files:
if fname == "__init__.py":
continue
lowerCamelCase_ = str((Path(lowerCamelCase__ ) / fname).relative_to(lowerCamelCase__ ) )
lowerCamelCase_ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(lowerCamelCase__ )
return submodules
__A =[
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def lowerCamelCase_ ( ):
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase_ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(lowerCamelCase__ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCamelCase_ = spec.loader.load_module()
lowerCamelCase_ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowerCamelCase__ ) > 0:
lowerCamelCase_ = "\n".join(F'- {module}' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F'{list_of_modules}\n'
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 463
|
from math import isqrt
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = False
return [i for i in range(2 , lowerCamelCase__ ) if is_prime[i]]
def lowerCamelCase_ ( lowerCamelCase__ = 1_0**8 ):
lowerCamelCase_ = calculate_prime_numbers(max_number // 2 )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = len(lowerCamelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 463
| 1
|
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def a__ ( _SCREAMING_SNAKE_CASE = 3 ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(_SCREAMING_SNAKE_CASE ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
UpperCamelCase = QuantumRegister(_SCREAMING_SNAKE_CASE , "qr" )
UpperCamelCase = ClassicalRegister(_SCREAMING_SNAKE_CASE , "cr" )
UpperCamelCase = QuantumCircuit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = number_of_qubits
for i in range(_SCREAMING_SNAKE_CASE ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_SCREAMING_SNAKE_CASE ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_SCREAMING_SNAKE_CASE , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# simulate with 10000 shots
UpperCamelCase = Aer.get_backend("qasm_simulator" )
UpperCamelCase = execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=10_000 )
return job.result().get_counts(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 544
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _lowerCamelCase ( _lowercase , _lowercase , unittest.TestCase ):
UpperCAmelCase_ = StableDiffusionPanoramaPipeline
UpperCAmelCase_ = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case_ (self ) -> int:
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
UpperCamelCase = DDIMScheduler()
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
UpperCamelCase = CLIPTextModel(__a )
UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case_ (self , __a , __a=0 ) -> List[str]:
UpperCamelCase = torch.manual_seed(__a )
UpperCamelCase = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def snake_case_ (self ) -> int:
UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionPanoramaPipeline(**__a )
UpperCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = self.get_dummy_inputs(__a )
UpperCamelCase = sd_pipe(**__a ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ (self ) -> Optional[int]:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case_ (self ) -> Tuple:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def snake_case_ (self ) -> Tuple:
UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionPanoramaPipeline(**__a )
UpperCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = self.get_dummy_inputs(__a )
UpperCamelCase = "french fries"
UpperCamelCase = sd_pipe(**__a , negative_prompt=__a )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ (self ) -> Tuple:
UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionPanoramaPipeline(**__a )
UpperCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = self.get_dummy_inputs(__a )
UpperCamelCase = sd_pipe(**__a , view_batch_size=2 )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" )
UpperCamelCase = StableDiffusionPanoramaPipeline(**__a )
UpperCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = self.get_dummy_inputs(__a )
UpperCamelCase = sd_pipe(**__a ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , skip_prk_steps=__a )
UpperCamelCase = StableDiffusionPanoramaPipeline(**__a )
UpperCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = self.get_dummy_inputs(__a )
UpperCamelCase = sd_pipe(**__a ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self , __a=0 ) -> List[str]:
UpperCamelCase = torch.manual_seed(__a )
UpperCamelCase = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = "stabilityai/stable-diffusion-2-base"
UpperCamelCase = DDIMScheduler.from_pretrained(__a , subfolder="scheduler" )
UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(__a , scheduler=__a , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCamelCase = self.get_inputs()
UpperCamelCase = pipe(**__a ).images
UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
UpperCamelCase = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=__a )
UpperCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCamelCase = self.get_inputs()
UpperCamelCase = pipe(**__a ).images
UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
UpperCamelCase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def snake_case_ (self ) -> Tuple:
UpperCamelCase = 0
def callback_fn(__a , __a , __a ) -> None:
UpperCamelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
UpperCamelCase = latents[0, -3:, -3:, -1]
UpperCamelCase = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
UpperCamelCase = latents[0, -3:, -3:, -1]
UpperCamelCase = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
UpperCamelCase = False
UpperCamelCase = "stabilityai/stable-diffusion-2-base"
UpperCamelCase = DDIMScheduler.from_pretrained(__a , subfolder="scheduler" )
UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(__a , scheduler=__a , safety_checker=__a )
UpperCamelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCamelCase = self.get_inputs()
pipe(**__a , callback=__a , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def snake_case_ (self ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase = "stabilityai/stable-diffusion-2-base"
UpperCamelCase = DDIMScheduler.from_pretrained(__a , subfolder="scheduler" )
UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(__a , scheduler=__a , safety_checker=__a )
UpperCamelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase = self.get_inputs()
UpperCamelCase = pipe(**__a )
UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 544
| 1
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
_lowerCAmelCase : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class __snake_case ( SCREAMING_SNAKE_CASE ):
def __init__( self ,*a_ ,**a_ ):
"""simple docstring"""
super().__init__(*a_ ,**a_ )
requires_backends(self ,'vision' )
self.check_model_type(a_ )
def __call__( self ,a_ ,**a_ ):
"""simple docstring"""
return super().__call__(a_ ,**a_ )
def SCREAMING_SNAKE_CASE_ ( self ,**a_ ):
"""simple docstring"""
return {}, {}, {}
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = load_image(a_ )
lowerCAmelCase__ = image.size
lowerCAmelCase__ = self.image_processor(images=a_ ,return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.model(**a_ )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = model_outputs.predicted_depth
lowerCAmelCase__ = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) ,size=self.image_size[::-1] ,mode='bicubic' ,align_corners=a_ )
lowerCAmelCase__ = prediction.squeeze().cpu().numpy()
lowerCAmelCase__ = (output * 255 / np.max(a_ )).astype('uint8' )
lowerCAmelCase__ = Image.fromarray(a_ )
lowerCAmelCase__ = {}
lowerCAmelCase__ = predicted_depth
lowerCAmelCase__ = depth
return output_dict
| 193
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = 42
class __snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self ,a_ = 16 ,a_ = 88 ,a_ = None ,a_ = None ,a_ = 1 ,a_ = 0.0 ,a_ = 32 ,a_ = None ,a_ = False ,a_ = None ,a_ = "geglu" ,a_ = True ,a_ = True ,):
"""simple docstring"""
super().__init__()
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = attention_head_dim
lowerCAmelCase__ = num_attention_heads * attention_head_dim
lowerCAmelCase__ = in_channels
lowerCAmelCase__ = torch.nn.GroupNorm(num_groups=a_ ,num_channels=a_ ,eps=1e-6 ,affine=a_ )
lowerCAmelCase__ = nn.Linear(a_ ,a_ )
# 3. Define transformers blocks
lowerCAmelCase__ = nn.ModuleList(
[
BasicTransformerBlock(
a_ ,a_ ,a_ ,dropout=a_ ,cross_attention_dim=a_ ,activation_fn=a_ ,attention_bias=a_ ,double_self_attention=a_ ,norm_elementwise_affine=a_ ,)
for d in range(a_ )
] )
lowerCAmelCase__ = nn.Linear(a_ ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_=None ,a_=None ,a_=None ,a_=1 ,a_=None ,a_ = True ,):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = hidden_states.shape
lowerCAmelCase__ = batch_frames // num_frames
lowerCAmelCase__ = hidden_states
lowerCAmelCase__ = hidden_states[None, :].reshape(a_ ,a_ ,a_ ,a_ ,a_ )
lowerCAmelCase__ = hidden_states.permute(0 ,2 ,1 ,3 ,4 )
lowerCAmelCase__ = self.norm(a_ )
lowerCAmelCase__ = hidden_states.permute(0 ,3 ,4 ,2 ,1 ).reshape(batch_size * height * width ,a_ ,a_ )
lowerCAmelCase__ = self.proj_in(a_ )
# 2. Blocks
for block in self.transformer_blocks:
lowerCAmelCase__ = block(
a_ ,encoder_hidden_states=a_ ,timestep=a_ ,cross_attention_kwargs=a_ ,class_labels=a_ ,)
# 3. Output
lowerCAmelCase__ = self.proj_out(a_ )
lowerCAmelCase__ = (
hidden_states[None, None, :]
.reshape(a_ ,a_ ,a_ ,a_ ,a_ )
.permute(0 ,3 ,4 ,1 ,2 )
.contiguous()
)
lowerCAmelCase__ = hidden_states.reshape(a_ ,a_ ,a_ ,a_ )
lowerCAmelCase__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=a_ )
| 193
| 1
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_A = logging.get_logger(__name__)
_A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_A = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_A = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_A = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_A = {
"facebook/dpr-ctx_encoder-single-nq-base": 5_12,
"facebook/dpr-ctx_encoder-multiset-base": 5_12,
}
_A = {
"facebook/dpr-question_encoder-single-nq-base": 5_12,
"facebook/dpr-question_encoder-multiset-base": 5_12,
}
_A = {
"facebook/dpr-reader-single-nq-base": 5_12,
"facebook/dpr-reader-multiset-base": 5_12,
}
_A = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_A = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_A = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class _lowerCAmelCase ( __a ):
_lowercase =VOCAB_FILES_NAMES
_lowercase =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowercase =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowercase =DPRContextEncoderTokenizer
class _lowerCAmelCase ( __a ):
_lowercase =VOCAB_FILES_NAMES
_lowercase =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowercase =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowercase =DPRQuestionEncoderTokenizer
_A = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_A = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_A = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__a )
class _lowerCAmelCase :
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , )
elif titles is None or texts is None:
lowerCAmelCase_ = titles if texts is None else texts
return super().__call__(
_UpperCamelCase , _UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , )
lowerCAmelCase_ = titles if not isinstance(_UpperCamelCase , _UpperCamelCase ) else [titles]
lowerCAmelCase_ = texts if not isinstance(_UpperCamelCase , _UpperCamelCase ) else [texts]
lowerCAmelCase_ = len(_UpperCamelCase )
lowerCAmelCase_ = questions if not isinstance(_UpperCamelCase , _UpperCamelCase ) else [questions] * n_passages
assert len(_UpperCamelCase ) == len(
_UpperCamelCase ), f"""There should be as many titles than texts but got {len(_UpperCamelCase )} titles and {len(_UpperCamelCase )} texts."""
lowerCAmelCase_ = super().__call__(_UpperCamelCase , _UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase )["input_ids"]
lowerCAmelCase_ = super().__call__(_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase )["input_ids"]
lowerCAmelCase_ = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_UpperCamelCase , _UpperCamelCase )
]
}
if return_attention_mask is not False:
lowerCAmelCase_ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCAmelCase_ = attention_mask
return self.pad(_UpperCamelCase , padding=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors=_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 16 , _UpperCamelCase = 64 , _UpperCamelCase = 4 , ) -> List[DPRSpanPrediction]:
lowerCAmelCase_ = reader_input["input_ids"]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = reader_output[:3]
lowerCAmelCase_ = len(_UpperCamelCase )
lowerCAmelCase_ = sorted(range(_UpperCamelCase ) , reverse=_UpperCamelCase , key=relevance_logits.__getitem__ )
lowerCAmelCase_ = []
for doc_id in sorted_docs:
lowerCAmelCase_ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCAmelCase_ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCAmelCase_ = sequence_ids.index(self.pad_token_id )
else:
lowerCAmelCase_ = len(_UpperCamelCase )
lowerCAmelCase_ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_UpperCamelCase , top_spans=_UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_UpperCamelCase , start_index=_UpperCamelCase , end_index=_UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> List[DPRSpanPrediction]:
lowerCAmelCase_ = []
for start_index, start_score in enumerate(_UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCAmelCase_ = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : x[1] , reverse=_UpperCamelCase )
lowerCAmelCase_ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
lowerCAmelCase_ = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__a )
class _lowerCAmelCase ( __a , __a ):
_lowercase =VOCAB_FILES_NAMES
_lowercase =READER_PRETRAINED_VOCAB_FILES_MAP
_lowercase =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase =READER_PRETRAINED_INIT_CONFIGURATION
_lowercase =['''input_ids''', '''attention_mask''']
_lowercase =DPRReaderTokenizer
| 279
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class _lowerCAmelCase ( __a ):
_lowercase ='''transfo-xl'''
_lowercase =['''mems''']
_lowercase ={
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _UpperCamelCase=267_735 , _UpperCamelCase=[20_000, 40_000, 200_000] , _UpperCamelCase=1_024 , _UpperCamelCase=1_024 , _UpperCamelCase=16 , _UpperCamelCase=64 , _UpperCamelCase=4_096 , _UpperCamelCase=4 , _UpperCamelCase=False , _UpperCamelCase=18 , _UpperCamelCase=1_600 , _UpperCamelCase=1_000 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=0 , _UpperCamelCase=-1 , _UpperCamelCase=True , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , _UpperCamelCase=True , _UpperCamelCase="normal" , _UpperCamelCase=0.01 , _UpperCamelCase=0.01 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-5 , _UpperCamelCase=0 , **_UpperCamelCase , ) -> Dict:
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = []
self.cutoffs.extend(_UpperCamelCase )
if proj_share_all_but_first:
lowerCAmelCase_ = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase_ = [False] + [False] * len(self.cutoffs )
lowerCAmelCase_ = d_model
lowerCAmelCase_ = d_embed
lowerCAmelCase_ = d_head
lowerCAmelCase_ = d_inner
lowerCAmelCase_ = div_val
lowerCAmelCase_ = pre_lnorm
lowerCAmelCase_ = n_layer
lowerCAmelCase_ = n_head
lowerCAmelCase_ = mem_len
lowerCAmelCase_ = same_length
lowerCAmelCase_ = attn_type
lowerCAmelCase_ = clamp_len
lowerCAmelCase_ = sample_softmax
lowerCAmelCase_ = adaptive
lowerCAmelCase_ = dropout
lowerCAmelCase_ = dropatt
lowerCAmelCase_ = untie_r
lowerCAmelCase_ = init
lowerCAmelCase_ = init_range
lowerCAmelCase_ = proj_init_std
lowerCAmelCase_ = init_std
lowerCAmelCase_ = layer_norm_epsilon
super().__init__(eos_token_id=_UpperCamelCase , **_UpperCamelCase )
@property
def __a ( self ) -> List[Any]:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __a ( self , _UpperCamelCase ) -> str:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 279
| 1
|
from __future__ import annotations
def UpperCamelCase ( _A ):
"""simple docstring"""
if len(_A ) == 0:
return []
__magic_name__ ,__magic_name__ : Tuple = min(_A ), max(_A )
__magic_name__ : int = int(max_value - min_value ) + 1
__magic_name__ : list[list] = [[] for _ in range(_A )]
for i in my_list:
buckets[int(i - min_value )].append(_A )
return [v for bucket in buckets for v in sorted(_A )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 324
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : str = StableDiffusionPanoramaPipeline
lowercase__ : str = TEXT_TO_IMAGE_PARAMS
lowercase__ : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
def __magic_name__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
__magic_name__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__magic_name__ : Union[str, Any] = DDIMScheduler()
torch.manual_seed(0 )
__magic_name__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__magic_name__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__magic_name__ : int = CLIPTextModel(lowerCAmelCase__ )
__magic_name__ : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__magic_name__ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> str:
__magic_name__ : Any = torch.manual_seed(lowerCAmelCase__ )
__magic_name__ : Tuple = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Union[str, Any] = self.get_dummy_components()
__magic_name__ : List[str] = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
__magic_name__ : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Dict = self.get_dummy_inputs(lowerCAmelCase__ )
__magic_name__ : Tuple = sd_pipe(**lowerCAmelCase__ ).images
__magic_name__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ : Any = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self ) -> List[Any]:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self ) -> str:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def __magic_name__ ( self ) -> str:
__magic_name__ : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Tuple = self.get_dummy_components()
__magic_name__ : Dict = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
__magic_name__ : int = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = """french fries"""
__magic_name__ : int = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
__magic_name__ : Dict = output.images
__magic_name__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ : Dict = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self ) -> str:
__magic_name__ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Any = self.get_dummy_components()
__magic_name__ : Tuple = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
__magic_name__ : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__magic_name__ : List[Any] = sd_pipe(**lowerCAmelCase__ , view_batch_size=2 )
__magic_name__ : List[Any] = output.images
__magic_name__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ : List[Any] = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : str = self.get_dummy_components()
__magic_name__ : int = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" )
__magic_name__ : str = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
__magic_name__ : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Dict = self.get_dummy_inputs(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = sd_pipe(**lowerCAmelCase__ ).images
__magic_name__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ : int = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Tuple = self.get_dummy_components()
__magic_name__ : int = PNDMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , skip_prk_steps=lowerCAmelCase__ )
__magic_name__ : List[Any] = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
__magic_name__ : Dict = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__magic_name__ : Dict = sd_pipe(**lowerCAmelCase__ ).images
__magic_name__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ : str = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self , lowerCAmelCase__=0 ) -> List[Any]:
__magic_name__ : Union[str, Any] = torch.manual_seed(lowerCAmelCase__ )
__magic_name__ : str = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : int = """stabilityai/stable-diffusion-2-base"""
__magic_name__ : Optional[int] = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="""scheduler""" )
__magic_name__ : Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__magic_name__ : List[str] = self.get_inputs()
__magic_name__ : Tuple = pipe(**lowerCAmelCase__ ).images
__magic_name__ : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
__magic_name__ : Tuple = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Dict = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__magic_name__ : List[str] = self.get_inputs()
__magic_name__ : int = pipe(**lowerCAmelCase__ ).images
__magic_name__ : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
__magic_name__ : Any = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __magic_name__ ( self ) -> str:
__magic_name__ : List[str] = 0
def callback_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
__magic_name__ : List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__magic_name__ : int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
__magic_name__ : int = latents[0, -3:, -3:, -1]
__magic_name__ : Optional[Any] = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__magic_name__ : List[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
__magic_name__ : List[str] = latents[0, -3:, -3:, -1]
__magic_name__ : Dict = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__magic_name__ : List[Any] = False
__magic_name__ : Union[str, Any] = """stabilityai/stable-diffusion-2-base"""
__magic_name__ : int = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="""scheduler""" )
__magic_name__ : List[str] = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__magic_name__ : List[Any] = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __magic_name__ ( self ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__magic_name__ : List[Any] = """stabilityai/stable-diffusion-2-base"""
__magic_name__ : List[str] = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="""scheduler""" )
__magic_name__ : List[str] = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
__magic_name__ : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__magic_name__ : List[Any] = self.get_inputs()
__magic_name__ : Union[str, Any] = pipe(**lowerCAmelCase__ )
__magic_name__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 324
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _lowerCamelCase ( __lowerCamelCase ):
UpperCAmelCase_ = '''gpt_neo'''
UpperCAmelCase_ = ['''past_key_values''']
UpperCAmelCase_ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__(self : Any , __a : Tuple=5_02_57 , __a : Optional[Any]=20_48 , __a : str=20_48 , __a : Any=24 , __a : Union[str, Any]=[[["global", "local"], 12]] , __a : str=16 , __a : Tuple=None , __a : Optional[Any]=2_56 , __a : Optional[int]="gelu_new" , __a : List[str]=0.0 , __a : Dict=0.0 , __a : int=0.0 , __a : Optional[int]=0.1 , __a : Tuple=1e-5 , __a : int=0.02 , __a : Union[str, Any]=True , __a : Any=5_02_56 , __a : Tuple=5_02_56 , **__a : List[str] , ) -> List[Any]:
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = num_layers
UpperCamelCase = num_heads
UpperCamelCase = intermediate_size
UpperCamelCase = window_size
UpperCamelCase = activation_function
UpperCamelCase = resid_dropout
UpperCamelCase = embed_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = classifier_dropout
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_range
UpperCamelCase = use_cache
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
UpperCamelCase = attention_types
UpperCamelCase = self.expand_attention_types_params(a_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
F"`config.num_layers = {self.num_layers}`. "
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=a_ , eos_token_id=a_ , **a_ )
@staticmethod
def snake_case_ (__a : Tuple ) -> Optional[int]:
UpperCamelCase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
import torch
UpperCamelCase = input.size()
UpperCamelCase = len(lowerCAmelCase_ )
UpperCamelCase = shape[dimension]
UpperCamelCase = torch.arange(0 , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCamelCase = torch.div(sizedim - size , lowerCAmelCase_ , rounding_mode="floor" ) + 1
UpperCamelCase = torch.arange(lowerCAmelCase_ ) + low_indices[:min_length][:, None]
UpperCamelCase = [slice(lowerCAmelCase_ )] * rank
UpperCamelCase = indices
UpperCamelCase = input[s]
UpperCamelCase = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCAmelCase_ )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
import torch
UpperCamelCase = torch.arange(1 , lowerCAmelCase_ )
UpperCamelCase = torch.remainder(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCamelCase = remainders == 0
UpperCamelCase = candidates[divisor_indices]
UpperCamelCase = torch.max(lowerCAmelCase_ )
return largest_divisor, torch.div(lowerCAmelCase_ , lowerCAmelCase_ , rounding_mode="floor" )
class _lowerCamelCase ( __lowerCamelCase ):
@property
def snake_case_ (self : Union[str, Any] ) -> Any:
UpperCamelCase = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(a_ , direction="inputs" )
UpperCamelCase = {0: "batch", 1: "past_sequence + sequence"}
else:
UpperCamelCase = {0: "batch", 1: "sequence"}
return common_inputs
@property
def snake_case_ (self : Union[str, Any] ) -> List[Any]:
return self._config.num_heads
def snake_case_ (self : Optional[Any] , __a : List[Any] , __a : Tuple = -1 , __a : Union[str, Any] = -1 , __a : str = False , __a : Optional[Any] = None , ) -> Union[str, Any]:
UpperCamelCase = super(a_ , self ).generate_dummy_inputs(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
# We need to order the input in the way they appears in the forward()
UpperCamelCase = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCamelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCamelCase = seqlen + 2
UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCamelCase = [
(torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(self.num_layers )
]
UpperCamelCase = common_inputs["attention_mask"]
if self.use_past:
UpperCamelCase = ordered_inputs["attention_mask"].dtype
UpperCamelCase = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 )
return ordered_inputs
@property
def snake_case_ (self : int ) -> Tuple:
return 13
| 707
|
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def a__ ( _SCREAMING_SNAKE_CASE ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def a__ ( ):
"""simple docstring"""
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
UpperCamelCase = [1, 2, 3]
with pytest.raises(_SCREAMING_SNAKE_CASE ):
with parallel_backend("unsupported backend" ):
map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=2 )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
with parallel_backend("unsupported backend" ):
map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = [1, 2]
UpperCamelCase = {"a": 1, "b": 2}
UpperCamelCase = {"a": [1, 2], "b": [3, 4]}
UpperCamelCase = {"a": {"1": 1}, "b": 2}
UpperCamelCase = {"a": 1, "b": 2, "c": 3, "d": 4}
UpperCamelCase = [2, 3]
UpperCamelCase = {"a": 2, "b": 3}
UpperCamelCase = {"a": [2, 3], "b": [4, 5]}
UpperCamelCase = {"a": {"1": 2}, "b": 3}
UpperCamelCase = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
| 544
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class a_ :
def __init__( self : Union[str, Any] , snake_case__ : int = 6 ):
lowerCAmelCase__ = None
lowerCAmelCase__ = None
self.create_linked_list(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : int ):
lowerCAmelCase__ = Node()
lowerCAmelCase__ = current_node
lowerCAmelCase__ = current_node
lowerCAmelCase__ = current_node
for _ in range(1 , snake_case__ ):
lowerCAmelCase__ = Node()
lowerCAmelCase__ = current_node
lowerCAmelCase__ = previous_node
lowerCAmelCase__ = current_node
lowerCAmelCase__ = self.front
lowerCAmelCase__ = previous_node
def _SCREAMING_SNAKE_CASE ( self : Any ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _SCREAMING_SNAKE_CASE ( self : Any ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Any ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCAmelCase__ = self.rear.next
if self.rear:
lowerCAmelCase__ = data
def _SCREAMING_SNAKE_CASE ( self : Any ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCAmelCase__ = self.front.data
lowerCAmelCase__ = None
return data
lowerCAmelCase__ = self.front
lowerCAmelCase__ = old_front.next
lowerCAmelCase__ = old_front.data
lowerCAmelCase__ = None
return data
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class a_ :
def __init__( self : Union[str, Any] ):
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
|
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = [0] * no_of_processes
lowerCAmelCase__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase__ ):
lowerCAmelCase__ = burst_time[i]
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCAmelCase__ = []
lowerCAmelCase__ = -1
for i in range(lowerCamelCase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
lowerCAmelCase__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCAmelCase__ = i
total_time += burst_time[target_process]
completed += 1
lowerCAmelCase__ = 0
lowerCAmelCase__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = [0] * no_of_processes
for i in range(lowerCamelCase__ ):
lowerCAmelCase__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
__lowerCAmelCase : str = 4
__lowerCAmelCase : Dict = [2, 5, 3, 7]
__lowerCAmelCase : List[str] = [0, 0, 0, 0]
__lowerCAmelCase : Union[str, Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCAmelCase : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 644
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Tuple = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 713
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a_ : List[str] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase__ (_UpperCAmelCase):
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested')
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested')
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested')
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment')
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate')
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule')
def lowerCamelCase__ (_UpperCAmelCase):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase):
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE = terminalreporter.config.getoption('--make-reports')
if make_reports:
pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
SCREAMING_SNAKE_CASE = 0
# Doctest custom flag to ignore output.
a_ : List[Any] = doctest.register_optionflag('IGNORE_RESULT')
a_ : Any = doctest.OutputChecker
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> List[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a)
a_ : int = CustomOutputChecker
a_ : List[str] = HfDoctestModule
a_ : Optional[int] = HfDocTestParser
| 444
| 0
|
def A ( _SCREAMING_SNAKE_CASE ) -> Any:
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
lowerCamelCase : Any = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowerCamelCase : Optional[Any] = 1
if upper_limit > 0:
lowerCamelCase : Tuple = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 ,upper_limit + 1 ):
for j in range(_SCREAMING_SNAKE_CASE ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 311
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a__ ( unittest.TestCase ):
def lowercase__ (self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(__UpperCAmelCase )
self.assertTrue(isinstance(dc.token_ids, __UpperCAmelCase ) )
with self.assertRaises(__UpperCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def lowercase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCAmelCase ):
DisjunctiveConstraint(__UpperCAmelCase ) # fails here
def lowercase__ (self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveConstraint(__UpperCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = dc.update(1 )
SCREAMING_SNAKE_CASE : Any = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = dc.update(2 )
SCREAMING_SNAKE_CASE : Any = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = dc.update(3 )
SCREAMING_SNAKE_CASE : List[Any] = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def lowercase__ (self : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE : Union[str, Any] = DisjunctiveConstraint(__UpperCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 507
| 0
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = "xlnet"
SCREAMING_SNAKE_CASE__ : List[str] = ["mems"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self: Dict , _lowerCamelCase: Optional[int]=3_20_00 , _lowerCamelCase: List[Any]=10_24 , _lowerCamelCase: Dict=24 , _lowerCamelCase: Tuple=16 , _lowerCamelCase: Union[str, Any]=40_96 , _lowerCamelCase: Optional[int]="gelu" , _lowerCamelCase: Tuple=True , _lowerCamelCase: Optional[Any]="bi" , _lowerCamelCase: str=0.02 , _lowerCamelCase: List[Any]=1E-12 , _lowerCamelCase: Optional[int]=0.1 , _lowerCamelCase: Dict=5_12 , _lowerCamelCase: Any=None , _lowerCamelCase: Optional[Any]=True , _lowerCamelCase: Optional[int]=False , _lowerCamelCase: Optional[int]=False , _lowerCamelCase: Union[str, Any]=-1 , _lowerCamelCase: Tuple=False , _lowerCamelCase: str="last" , _lowerCamelCase: Tuple=True , _lowerCamelCase: int="tanh" , _lowerCamelCase: Any=0.1 , _lowerCamelCase: Union[str, Any]=5 , _lowerCamelCase: Any=5 , _lowerCamelCase: int=5 , _lowerCamelCase: str=1 , _lowerCamelCase: Optional[Any]=2 , **_lowerCamelCase: Optional[int] , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = n_layer
SCREAMING_SNAKE_CASE_ = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
SCREAMING_SNAKE_CASE_ = d_model // n_head
SCREAMING_SNAKE_CASE_ = ff_activation
SCREAMING_SNAKE_CASE_ = d_inner
SCREAMING_SNAKE_CASE_ = untie_r
SCREAMING_SNAKE_CASE_ = attn_type
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = mem_len
SCREAMING_SNAKE_CASE_ = reuse_len
SCREAMING_SNAKE_CASE_ = bi_data
SCREAMING_SNAKE_CASE_ = clamp_len
SCREAMING_SNAKE_CASE_ = same_length
SCREAMING_SNAKE_CASE_ = summary_type
SCREAMING_SNAKE_CASE_ = summary_use_proj
SCREAMING_SNAKE_CASE_ = summary_activation
SCREAMING_SNAKE_CASE_ = summary_last_dropout
SCREAMING_SNAKE_CASE_ = start_n_top
SCREAMING_SNAKE_CASE_ = end_n_top
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = pad_token_id
SCREAMING_SNAKE_CASE_ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , _lowerCamelCase , )
SCREAMING_SNAKE_CASE_ = kwargs['''use_cache''']
SCREAMING_SNAKE_CASE_ = use_mems_eval
SCREAMING_SNAKE_CASE_ = use_mems_train
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
@property
def _A ( self: List[Any] ):
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def _A ( self: str , _lowerCamelCase: List[Any] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 89
|
import requests
def a (_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = {'''Content-Type''': '''application/json'''}
SCREAMING_SNAKE_CASE_ = requests.post(_lowerCAmelCase , json={'''text''': message_body} , headers=_lowerCAmelCase )
if response.status_code != 2_0_0:
SCREAMING_SNAKE_CASE_ = (
'''Request to slack returned an error '''
F"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(_lowerCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 89
| 1
|
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = BeautifulSoup(requests.get(__SCREAMING_SNAKE_CASE , params=__SCREAMING_SNAKE_CASE ).content , 'html.parser' )
lowercase = soup.find('div' , attrs={'class': 'gs_ri'} )
lowercase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCAmelCase = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 2018,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 84
|
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_snake_case = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_snake_case = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_snake_case = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_snake_case = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_snake_case = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_snake_case = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def _a ( __lowercase ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , __lowercase )
return [m.group(0 ) for m in matches]
def _a ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCamelCase = {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCamelCase = collections.defaultdict(__lowercase )
__UpperCamelCase = collections.defaultdict(__lowercase )
__UpperCamelCase = collections.defaultdict(__lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__lowercase ):
__UpperCamelCase = None
if _re_tf_models.match(__lowercase ) is not None:
__UpperCamelCase = tf_models
__UpperCamelCase = _re_tf_models.match(__lowercase ).groups()[0]
elif _re_flax_models.match(__lowercase ) is not None:
__UpperCamelCase = flax_models
__UpperCamelCase = _re_flax_models.match(__lowercase ).groups()[0]
elif _re_pt_models.match(__lowercase ) is not None:
__UpperCamelCase = pt_models
__UpperCamelCase = _re_pt_models.match(__lowercase ).groups()[0]
if lookup_dict is not None:
while len(__lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCamelCase = True
break
# Try again after removing the last word in the name
__UpperCamelCase = ''.join(camel_case_split(__lowercase )[:-1] )
__UpperCamelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCamelCase = list(__lowercase )
all_models.sort()
__UpperCamelCase = {'model_type': all_models}
__UpperCamelCase = [pt_models[t] for t in all_models]
__UpperCamelCase = [tf_models[t] for t in all_models]
__UpperCamelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCamelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCamelCase = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCamelCase = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCamelCase = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCamelCase = 'AutoTokenizer'
__UpperCamelCase = [processors[t] for t in all_models]
return pd.DataFrame(__lowercase )
def _a ( __lowercase ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCamelCase = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""]
__UpperCamelCase = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(__lowercase , __lowercase , __lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(__lowercase , __lowercase ):
continue
# First extract all model_names
__UpperCamelCase = []
for name in getattr(__lowercase , __lowercase ).values():
if isinstance(__lowercase , __lowercase ):
model_names.append(__lowercase )
else:
model_names.extend(list(__lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _a ( __lowercase , __lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = get_frameworks_table()
__UpperCamelCase = Dataset.from_pandas(__lowercase )
__UpperCamelCase = hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=__lowercase )
__UpperCamelCase = Dataset.from_json(__lowercase )
__UpperCamelCase = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(__lowercase ) )
}
__UpperCamelCase = update_pipeline_and_auto_class_table(__lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCamelCase = sorted(table.keys() )
__UpperCamelCase = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
__UpperCamelCase = Dataset.from_pandas(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__lowercase , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(__lowercase , 'pipeline_tags.json' ) )
if commit_sha is not None:
__UpperCamelCase = (
F"""Update with commit {commit_sha}\n\nSee: """
F"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__UpperCamelCase = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=__lowercase , repo_type='dataset' , token=__lowercase , commit_message=__lowercase , )
def _a ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCamelCase = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCamelCase = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCamelCase = pipeline_tasks[key]['pt']
if isinstance(__lowercase , (list, tuple) ):
__UpperCamelCase = model[0]
__UpperCamelCase = model.__name__
if model not in in_table.values():
missing.append(__lowercase )
if len(__lowercase ) > 0:
__UpperCamelCase = ', '.join(__lowercase )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
F"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_snake_case = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 383
| 0
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def __magic_name__ ( A : bool = True, *A : int, **A : List[Any] ):
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
a = False
if main_process_only:
a = PartialState().local_process_index == 0
return _tqdm(*A, **A, disable=A )
| 662
|
def __magic_name__ ( A : int, A : int, A : int ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
a = _modexpt(A, exponent // 2, A ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A, exponent - 1, A )) % modulo_value
def __magic_name__ ( A : int = 1777, A : int = 1855, A : int = 8 ):
'''simple docstring'''
a = base
for _ in range(1, A ):
a = _modexpt(A, A, 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 662
| 1
|
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowercase ( __snake_case : str , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : str=5 ):
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''' ) == 1
lowercase_ : List[str] = torch.tensor(tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) ).unsqueeze(0 ) # Batch size 1
lowercase_ : List[str] = model(__snake_case )[0] # The last hidden-state is the first element of the output tuple
lowercase_ : Optional[int] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
lowercase_ : Union[str, Any] = logits[0, masked_index, :]
lowercase_ : Dict = logits.softmax(dim=0 )
lowercase_ , lowercase_ : Tuple = prob.topk(k=__snake_case , dim=0 )
lowercase_ : List[Any] = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__snake_case ) )] )
lowercase_ : Any = tokenizer.mask_token
lowercase_ : Optional[int] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
lowercase_ : List[str] = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(__snake_case ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(__snake_case ) , __snake_case ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__snake_case , __snake_case ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__A : Dict = CamembertTokenizer.from_pretrained('''camembert-base''')
__A : List[Any] = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
__A : Dict = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 231
|
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : Optional[int] ) -> Optional[int]:
lowercase_ : Dict = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
lowercase_ : List[str] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
lowercase_ : int = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
lowercase_ : Union[str, Any] = tf_top_k_top_p_filtering(A , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
lowercase_ : str = output[output != -float('''inf''' )]
lowercase_ : int = tf.cast(
tf.where(tf.not_equal(A , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(A , A , rtol=1e-12 )
tf.debugging.assert_equal(A , A )
@require_tf
class _UpperCAmelCase ( unittest.TestCase , _A ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
SCREAMING_SNAKE_CASE_ : Tuple = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def A ( self : Any ) -> int:
# TF-only test: tf.saved_model export
lowercase_ : int = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase_ : Tuple = 2
lowercase_ : Optional[Any] = 2
class _UpperCAmelCase ( tf.Module ):
def __init__( self : Optional[int] , A : int ) -> List[Any]:
super(A , self ).__init__()
lowercase_ : Tuple = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=A , )
def A ( self : Optional[int] , A : str , A : List[Any] ) -> Any:
lowercase_ : Optional[Any] = self.model.generate(
input_ids=A , attention_mask=A , max_new_tokens=A , return_dict_in_generate=A , )
return {"sequences": outputs["sequences"]}
lowercase_ : Tuple = [[2, 0], [1_02, 1_03]]
lowercase_ : int = [[1, 0], [1, 1]]
lowercase_ : List[str] = DummyModel(model=A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(A , A , signatures={'''serving_default''': dummy_model.serving} )
lowercase_ : Optional[int] = tf.saved_model.load(A ).signatures['''serving_default''']
for batch_size in range(1 , len(A ) + 1 ):
lowercase_ : Union[str, Any] = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
lowercase_ : Dict = serving_func(**A )['''sequences''']
lowercase_ : Tuple = test_model.generate(**A , max_new_tokens=A )
tf.debugging.assert_equal(A , A )
@slow
def A ( self : Tuple ) -> List[str]:
# TF-only test: tf.saved_model export
lowercase_ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase_ : List[Any] = 1
lowercase_ : Dict = 2
class _UpperCAmelCase ( tf.Module ):
def __init__( self : Optional[int] , A : Any ) -> int:
super(A , self ).__init__()
lowercase_ : Optional[int] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=A , )
def A ( self : Any , A : List[Any] , A : Dict ) -> Tuple:
lowercase_ : int = self.model.generate(
input_ids=A , attention_mask=A , max_new_tokens=A , return_dict_in_generate=A , )
return {"sequences": outputs["sequences"]}
lowercase_ : int = [[2], [1_02, 1_03]]
lowercase_ : List[Any] = [[1], [1, 1]]
lowercase_ : List[Any] = DummyModel(model=A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(A , A , signatures={'''serving_default''': dummy_model.serving} )
lowercase_ : List[Any] = tf.saved_model.load(A ).signatures['''serving_default''']
for input_row in range(len(A ) ):
lowercase_ : Tuple = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
lowercase_ : Optional[Any] = serving_func(**A )['''sequences''']
lowercase_ : str = test_model.generate(**A , max_new_tokens=A )
tf.debugging.assert_equal(A , A )
@slow
@require_tensorflow_text
def A ( self : Union[str, Any] ) -> str:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=A )
class _UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] ) -> Optional[Any]:
super().__init__()
lowercase_ : Any = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(A , '''spiece.model''' ) , '''rb''' ).read() )
lowercase_ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def A ( self : Tuple , A : Dict , *A : Union[str, Any] , **A : Optional[Any] ) -> Dict:
lowercase_ : List[Any] = self.tokenizer.tokenize(A )
lowercase_ , lowercase_ : List[Any] = text.pad_model_inputs(
A , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
lowercase_ : Optional[int] = self.model.generate(input_ids=A , attention_mask=A )
return self.tokenizer.detokenize(A )
lowercase_ : Tuple = CompleteSentenceTransformer()
lowercase_ : Optional[Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
lowercase_ : int = complete_model(A )
lowercase_ : List[Any] = tf.keras.Model(A , A )
keras_model.save(A )
def A ( self : int ) -> Optional[Any]:
# Has PT equivalent: this test relies on random sampling
lowercase_ : Union[str, Any] = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 10,
'''temperature''': 0.7,
}
lowercase_ : Any = 14
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase_ : Optional[int] = '''Hello, my dog is cute and'''
lowercase_ : Dict = tokenizer(A , return_tensors='''tf''' )
lowercase_ : List[str] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase_ : Any = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
lowercase_ : Union[str, Any] = model.generate(**A , eos_token_id=A , **A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowercase_ : int = [6_38, 1_98]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
lowercase_ : Any = model.generate(**A , eos_token_id=A , **A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def A ( self : Dict ) -> Optional[int]:
# Has PT equivalent: ample use of framework-specific code
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowercase_ : int = '''Hugging Face is a technology company based in New York and Paris.'''
lowercase_ : List[str] = bart_tokenizer(A , return_tensors='''tf''' ).input_ids
lowercase_ : Tuple = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowercase_ : Optional[int] = bart_model.generate(A ).numpy()
class _UpperCAmelCase ( _A ):
def A ( self : List[Any] , A : List[str] , A : Optional[Any]=None , **A : Any ) -> Any:
return super().call(A , **A )
lowercase_ : Union[str, Any] = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowercase_ : List[Any] = bart_model.generate(A , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(A , A ) )
class _UpperCAmelCase ( bart_model.model.encoder.__class__ ):
def A ( self : int , A : List[str] , **A : int ) -> Tuple:
return super().call(A , **A )
lowercase_ : Dict = FakeEncoder(bart_model.config , bart_model.model.shared )
lowercase_ : Optional[Any] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowercase_ : int = bart_model.generate(A ).numpy()
with self.assertRaises(A ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(A , foo='''bar''' )
| 231
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , ):
_lowerCamelCase : List[str] = size if size is not None else {"height": 18, "width": 18}
_lowerCamelCase : Tuple = parent
_lowerCamelCase : List[Any] = batch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : Dict = image_size
_lowerCamelCase : Dict = min_resolution
_lowerCamelCase : List[Any] = max_resolution
_lowerCamelCase : List[Any] = do_resize
_lowerCamelCase : Union[str, Any] = size
_lowerCamelCase : Dict = apply_ocr
def A_ ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( snake_case__, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def A_ ( self ):
_lowerCamelCase : Any = LayoutLMvaImageProcessingTester(self )
@property
def A_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self ):
_lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowercase_ , 'size' ) )
self.assertTrue(hasattr(lowercase_ , 'apply_ocr' ) )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
_lowerCamelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def A_ ( self ):
pass
def A_ ( self ):
# Initialize image_processing
_lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
_lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , lowercase_ )
self.assertIsInstance(encoding.boxes , lowercase_ )
# Test batched
_lowerCamelCase : Tuple = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def A_ ( self ):
# Initialize image_processing
_lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
_lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_lowerCamelCase : List[Any] = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def A_ ( self ):
# Initialize image_processing
_lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
_lowerCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_lowerCamelCase : Optional[Any] = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def A_ ( self ):
# with apply_OCR = True
_lowerCamelCase : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
_lowerCamelCase : Any = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
_lowerCamelCase : int = Image.open(ds[0]['file'] ).convert('RGB' )
_lowerCamelCase : Any = image_processing(lowercase_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_lowerCamelCase : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
_lowerCamelCase : str = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowercase_ )
self.assertListEqual(encoding.boxes , lowercase_ )
# with apply_OCR = False
_lowerCamelCase : Tuple = LayoutLMvaImageProcessor(apply_ocr=lowercase_ )
_lowerCamelCase : List[Any] = image_processing(lowercase_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 704
|
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowercase__ = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
lowercase__ = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
lowercase__ = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def A_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def A_ ( self , lowercase , lowercase , lowercase = 1 , lowercase = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowercase , hypotheses=lowercase , min_len=lowercase , max_len=lowercase )
}
| 492
| 0
|
'''simple docstring'''
import math
import sys
def _A ( A ) -> Dict:
if number != int(a__ ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
lowercase : Dict = [-1] * (number + 1)
lowercase : List[Any] = 0
for i in range(1 ,number + 1 ):
lowercase : int = sys.maxsize
lowercase : List[str] = int(math.sqrt(a__ ) )
for j in range(1 ,root + 1 ):
lowercase : Dict = 1 + answers[i - (j**2)]
lowercase : Optional[Any] = min(a__ ,a__ )
lowercase : Union[str, Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 372
|
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__snake_case : Optional[Any] = 10
def _UpperCAmelCase ( a__ , a__ , a__ , a__):
'''simple docstring'''
for i in range(a__ , a__):
if array[i] == target:
return i
return -1
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
a_ : Any = 0
a_ : List[str] = len(a__)
while left <= right:
if right - left < precision:
return lin_search(a__ , a__ , a__ , a__)
a_ : Dict = (left + right) // 3 + 1
a_ : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
a_ : int = one_third - 1
elif array[two_third] < target:
a_ : Any = two_third + 1
else:
a_ : Optional[int] = one_third + 1
a_ : int = two_third - 1
else:
return -1
def _UpperCAmelCase ( a__ , a__ , a__ , a__):
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(a__ , a__ , a__ , a__)
a_ : Optional[int] = (left + right) // 3 + 1
a_ : Union[str, Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(a__ , one_third - 1 , a__ , a__)
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , a__ , a__ , a__)
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , a__ , a__)
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Optional[int] = input("""Enter numbers separated by comma:\n""").strip()
__snake_case : Optional[int] = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
__snake_case : int = int(input("""Enter the number to be found in the list:\n""").strip())
__snake_case : List[Any] = ite_ternary_search(collection, target)
__snake_case : Optional[int] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print("""Not found""")
| 540
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def A_ ( __a : int ):
"""simple docstring"""
a__ = DPTConfig()
if "large" in checkpoint_url:
a__ = 1_024
a__ = 4_096
a__ = 24
a__ = 16
a__ = [5, 11, 17, 23]
a__ = [256, 512, 1_024, 1_024]
a__ = (1, 384, 384)
if "ade" in checkpoint_url:
a__ = True
a__ = 150
a__ = """huggingface/label-files"""
a__ = """ade20k-id2label.json"""
a__ = json.load(open(cached_download(hf_hub_url(__a , __a , repo_type="""dataset""" ) ) , """r""" ) )
a__ = {int(__a ): v for k, v in idalabel.items()}
a__ = idalabel
a__ = {v: k for k, v in idalabel.items()}
a__ = [1, 150, 480, 480]
return config, expected_shape
def A_ ( __a : int ):
"""simple docstring"""
a__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(__a , __a )
def A_ ( __a : Union[str, Any] ):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
a__ = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
a__ = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
a__ = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
a__ = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
a__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
a__ = name.replace("""proj""" , """projection""" )
if "blocks" in name:
a__ = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
a__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
a__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
a__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
a__ = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
a__ = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
a__ = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
a__ = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
a__ = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
a__ = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
a__ = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
a__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
a__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
a__ = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
a__ = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
a__ = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
a__ = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
a__ = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
a__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
a__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
a__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
a__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
a__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
a__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
a__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
a__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
a__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
a__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
a__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
a__ = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
a__ = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
a__ = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
a__ = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
a__ = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def A_ ( __a : Union[str, Any] , __a : Optional[int] ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
a__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a__ = in_proj_weight[: config.hidden_size, :]
a__ = in_proj_bias[: config.hidden_size]
a__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a__ = in_proj_weight[
-config.hidden_size :, :
]
a__ = in_proj_bias[-config.hidden_size :]
def A_ ( ):
"""simple docstring"""
a__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a__ = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def A_ ( __a : Optional[int] , __a : Optional[int] , __a : Tuple , __a : Union[str, Any] ):
"""simple docstring"""
a__ , a__ = get_dpt_config(__a )
# load original state_dict from URL
a__ = torch.hub.load_state_dict_from_url(__a , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(__a )
# rename keys
for key in state_dict.copy().keys():
a__ = state_dict.pop(__a )
a__ = val
# read in qkv matrices
read_in_q_k_v(__a , __a )
# load HuggingFace model
a__ = DPTForSemanticSegmentation(__a ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__a )
model.load_state_dict(__a )
model.eval()
# Check outputs on an image
a__ = 480 if """ade""" in checkpoint_url else 384
a__ = DPTImageProcessor(size=__a )
a__ = prepare_img()
a__ = image_processor(__a , return_tensors="""pt""" )
# forward pass
a__ = model(**__a ).logits if """ade""" in checkpoint_url else model(**__a ).predicted_depth
# Assert logits
a__ = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] )
if "ade" in checkpoint_url:
a__ = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] )
assert outputs.shape == torch.Size(__a )
assert (
torch.allclose(outputs[0, 0, :3, :3] , __a , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , __a )
)
Path(__a ).mkdir(exist_ok=__a )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__a )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__a )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(__a , __a ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=__a , )
image_processor.push_to_hub(
repo_path_or_name=Path(__a , __a ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=__a , )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
UpperCAmelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 351
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __snake_case :
'''simple docstring'''
@staticmethod
def _a ( *a_ , **a_ ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __snake_case ( unittest.TestCase):
'''simple docstring'''
UpperCamelCase__ : Dict = MODEL_FOR_OBJECT_DETECTION_MAPPING
def _a ( self , a_ , a_ , a_ ):
a__ = ObjectDetectionPipeline(model=a_ , image_processor=a_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def _a ( self , a_ , a_ ):
a__ = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(a_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
a_ , {
"""score""": ANY(a_ ),
"""label""": ANY(a_ ),
"""box""": {"""xmin""": ANY(a_ ), """ymin""": ANY(a_ ), """xmax""": ANY(a_ ), """ymax""": ANY(a_ )},
} , )
import datasets
a__ = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
a__ = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
a__ = object_detector(a_ , threshold=0.0 )
self.assertEqual(len(a_ ) , len(a_ ) )
for outputs in batch_outputs:
self.assertGreater(len(a_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
a_ , {
"""score""": ANY(a_ ),
"""label""": ANY(a_ ),
"""box""": {"""xmin""": ANY(a_ ), """ymin""": ANY(a_ ), """xmax""": ANY(a_ ), """ymax""": ANY(a_ )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def _a ( self ):
pass
@require_torch
def _a ( self ):
a__ = """hf-internal-testing/tiny-detr-mobilenetsv3"""
a__ = AutoModelForObjectDetection.from_pretrained(a_ )
a__ = AutoFeatureExtractor.from_pretrained(a_ )
a__ = ObjectDetectionPipeline(model=a_ , feature_extractor=a_ )
a__ = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] , )
a__ = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
[
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] , )
@require_torch
@slow
def _a ( self ):
a__ = """facebook/detr-resnet-50"""
a__ = AutoModelForObjectDetection.from_pretrained(a_ )
a__ = AutoFeatureExtractor.from_pretrained(a_ )
a__ = ObjectDetectionPipeline(model=a_ , feature_extractor=a_ )
a__ = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
a__ = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
[
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def _a ( self ):
a__ = """facebook/detr-resnet-50"""
a__ = pipeline("""object-detection""" , model=a_ )
a__ = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
a__ = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
[
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def _a ( self ):
a__ = 0.9_985
a__ = """facebook/detr-resnet-50"""
a__ = pipeline("""object-detection""" , model=a_ )
a__ = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=a_ )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def _a ( self ):
a__ = """Narsil/layoutlmv3-finetuned-funsd"""
a__ = 0.9_993
a__ = pipeline("""object-detection""" , model=a_ , threshold=a_ )
a__ = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{"""score""": 0.9_993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.9_993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] , )
| 351
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ : List[str] = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Dict = ['''BeitFeatureExtractor''']
UpperCamelCase_ : Tuple = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Any = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Tuple = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 115
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A__ : Any =None
A__ : Optional[int] =logging.get_logger(__name__)
A__ : Union[str, Any] ={'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : List[str] ={
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
A__ : List[str] ={
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
A__ : List[str] ='''▁'''
# Segments (not really needed)
A__ : str =0
A__ : str =1
A__ : List[Any] =2
A__ : str =3
A__ : Optional[Any] =4
class UpperCAmelCase ( snake_case_ ):
_lowercase: Optional[int] = VOCAB_FILES_NAMES
_lowercase: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowercase: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase: Optional[Any] = '''left'''
_lowercase: Dict = XLNetTokenizer
def __init__( self : List[str] , __snake_case : Optional[Any]=None , __snake_case : str=None , __snake_case : Union[str, Any]=False , __snake_case : str=True , __snake_case : Union[str, Any]=False , __snake_case : List[Any]="<s>" , __snake_case : List[Any]="</s>" , __snake_case : str="<unk>" , __snake_case : int="<sep>" , __snake_case : int="<pad>" , __snake_case : Dict="<cls>" , __snake_case : int="<mask>" , __snake_case : Optional[int]=["<eop>", "<eod>"] , **__snake_case : List[str] , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
vocab_file=__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
_lowerCAmelCase = 3
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = remove_space
_lowerCAmelCase = keep_accents
_lowerCAmelCase = vocab_file
_lowerCAmelCase = False if not self.vocab_file else True
def lowercase__ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase__ ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase__ ( self : Optional[Any] , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 207
| 0
|
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __a(SCREAMING_SNAKE_CASE_ : Dict[str, torch.Tensor] ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for rt in rc.restypes:
_lowerCAmelCase = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
_lowerCAmelCase = {name: i for i, name in enumerate(snake_case_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
_lowerCAmelCase = torch.tensor(
snake_case_ , dtype=torch.intaa , device=protein["aatype"].device , )
_lowerCAmelCase = torch.tensor(
snake_case_ , dtype=torch.intaa , device=protein["aatype"].device , )
_lowerCAmelCase = torch.tensor(
snake_case_ , dtype=torch.floataa , device=protein["aatype"].device , )
_lowerCAmelCase = protein["aatype"].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
_lowerCAmelCase = restype_atomaa_to_atomaa[protein_aatype]
_lowerCAmelCase = restype_atomaa_mask[protein_aatype]
_lowerCAmelCase = residx_atomaa_mask
_lowerCAmelCase = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
_lowerCAmelCase = restype_atomaa_to_atomaa[protein_aatype]
_lowerCAmelCase = residx_atomaa_to_atomaa.long()
# create the corresponding mask
_lowerCAmelCase = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device )
for restype, restype_letter in enumerate(rc.restypes ):
_lowerCAmelCase = rc.restype_atoa[restype_letter]
_lowerCAmelCase = rc.residue_atoms[restype_name]
for atom_name in atom_names:
_lowerCAmelCase = rc.atom_order[atom_name]
_lowerCAmelCase = 1
_lowerCAmelCase = restype_atomaa_mask[protein_aatype]
_lowerCAmelCase = residx_atomaa_mask
return protein
def __a(SCREAMING_SNAKE_CASE_ : Dict[str, torch.Tensor] ):
'''simple docstring'''
_lowerCAmelCase = tree_map(lambda SCREAMING_SNAKE_CASE_ : torch.tensor(snake_case_ , device=batch["aatype"].device ) , snake_case_ , np.ndarray )
_lowerCAmelCase = tensor_tree_map(lambda SCREAMING_SNAKE_CASE_ : np.array(snake_case_ ) , make_atomaa_masks(snake_case_ ) )
return out
| 703
|
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class lowerCAmelCase_ :
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str:
pass
def _snake_case ( self ) -> Dict:
pass
def _snake_case ( self ) -> int:
pass
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(_lowerCAmelCase )
_lowerCAmelCase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
_lowerCAmelCase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {"vision_model": vision_model, "text_model": text_model}
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
_lowerCAmelCase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
_lowerCAmelCase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
_lowerCAmelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
_lowerCAmelCase = after_output[0].numpy()
_lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1E-5 )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
_lowerCAmelCase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
_lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase = to_atuple(vision_model.config.image_size )
_lowerCAmelCase = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase , _lowerCAmelCase = self.get_pretrained_model_and_inputs()
_lowerCAmelCase = model_a(**_lowerCAmelCase )
_lowerCAmelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = model_a(**_lowerCAmelCase )
_lowerCAmelCase = after_outputs[0].numpy()
_lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1E-5 )
@require_tf
class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ):
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = TFViTModel(_lowerCAmelCase , name="vision_model" )
_lowerCAmelCase = TFBertModel(_lowerCAmelCase , name="text_model" )
return vision_model, text_model
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = TFViTModelTester(self )
_lowerCAmelCase = TFBertModelTester(self )
_lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ):
def _snake_case ( self ) -> List[Any]:
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ) -> int:
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
_lowerCAmelCase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
_lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCAmelCase = to_atuple(vision_model.config.image_size )
_lowerCAmelCase = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = TFDeiTModel(_lowerCAmelCase , name="vision_model" )
_lowerCAmelCase = TFRobertaModel(_lowerCAmelCase , name="text_model" )
return vision_model, text_model
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = TFDeiTModelTester(self )
_lowerCAmelCase = TFRobertaModelTester(self )
_lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ):
def _snake_case ( self ) -> Any:
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int:
_lowerCAmelCase = TFCLIPVisionModel(_lowerCAmelCase , name="vision_model" )
_lowerCAmelCase = TFBertModel(_lowerCAmelCase , name="text_model" )
return vision_model, text_model
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = TFCLIPVisionModelTester(self )
_lowerCAmelCase = TFBertModelTester(self )
_lowerCAmelCase = clip_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
_lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
_lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCAmelCase = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="np" )
_lowerCAmelCase = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_lowerCAmelCase = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1E-3 ) )
| 489
| 0
|
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCamelCase = numpy.array([0, 0])
lowerCamelCase = numpy.array([0.5, 0.8_660_254])
lowerCamelCase = numpy.array([1, 0])
lowerCamelCase = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = initial_vectors
for _ in range(lowerCAmelCase__ ):
UpperCAmelCase_ = iteration_step(lowerCAmelCase__ )
return vectors
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = []
for i, start_vector in enumerate(vectors[:-1] ):
UpperCAmelCase_ = vectors[i + 1]
new_vectors.append(lowerCAmelCase__ )
UpperCAmelCase_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = numpy.radians(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = numpy.cos(lowerCAmelCase__ ), numpy.sin(lowerCAmelCase__ )
UpperCAmelCase_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
UpperCAmelCase_ , UpperCAmelCase_ = zip(*lowerCAmelCase__ )
plt.plot(lowerCAmelCase__ , lowerCAmelCase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 82
|
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any]=7 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Any=18 , _UpperCAmelCase : int=30 , _UpperCAmelCase : Tuple=400 , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : int=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = size if size is not None else {"height": 20, "width": 20}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_convert_rgb
UpperCAmelCase_ = [512, 1024, 2048, 4096]
UpperCAmelCase_ = patch_size if patch_size is not None else {"height": 16, "width": 16}
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCAmelCase_ = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = PixaStructImageProcessingTester(self )
@property
def lowercase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase_ = 2048
UpperCAmelCase_ = image_processor(_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase_ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
UpperCAmelCase_ = "Hello"
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase_ = 3
@property
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) )
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 82
| 1
|
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
__lowerCAmelCase :List[str] = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False , ):
output_path.parent.mkdir(parents=UpperCAmelCase , exist_ok=UpperCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCAmelCase , UpperCAmelCase , f=output_path.as_posix() , input_names=UpperCAmelCase , output_names=UpperCAmelCase , dynamic_axes=UpperCAmelCase , do_constant_folding=UpperCAmelCase , use_external_data_format=UpperCAmelCase , enable_onnx_checker=UpperCAmelCase , opset_version=UpperCAmelCase , )
else:
export(
UpperCAmelCase , UpperCAmelCase , f=output_path.as_posix() , input_names=UpperCAmelCase , output_names=UpperCAmelCase , dynamic_axes=UpperCAmelCase , do_constant_folding=UpperCAmelCase , opset_version=UpperCAmelCase , )
@torch.no_grad()
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ):
_snake_case : Dict = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_snake_case : Optional[Any] = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
_snake_case : Dict = "cpu"
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase , torch_dtype=UpperCAmelCase ).to(UpperCAmelCase )
_snake_case : Any = Path(UpperCAmelCase )
# TEXT ENCODER
_snake_case : Optional[int] = pipeline.text_encoder.config.max_position_embeddings
_snake_case : Optional[int] = pipeline.text_encoder.config.hidden_size
_snake_case : str = pipeline.tokenizer(
"A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=UpperCAmelCase , return_tensors="pt" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=UpperCAmelCase , dtype=torch.intaa )) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
} , opset=UpperCAmelCase , )
del pipeline.text_encoder
# UNET
_snake_case : Optional[Any] = pipeline.unet.config.in_channels
_snake_case : Union[str, Any] = pipeline.unet.config.sample_size
_snake_case : Optional[Any] = output_path / "unet" / "model.onnx"
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).to(device=UpperCAmelCase , dtype=UpperCAmelCase ),
torch.randn(2 ).to(device=UpperCAmelCase , dtype=UpperCAmelCase ),
torch.randn(2 , UpperCAmelCase , UpperCAmelCase ).to(device=UpperCAmelCase , dtype=UpperCAmelCase ),
False,
) , output_path=UpperCAmelCase , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
} , opset=UpperCAmelCase , use_external_data_format=UpperCAmelCase , )
_snake_case : Dict = str(unet_path.absolute().as_posix() )
_snake_case : Union[str, Any] = os.path.dirname(UpperCAmelCase )
_snake_case : str = onnx.load(UpperCAmelCase )
# clean up existing tensor files
shutil.rmtree(UpperCAmelCase )
os.mkdir(UpperCAmelCase )
# collate external tensor files into one
onnx.save_model(
UpperCAmelCase , UpperCAmelCase , save_as_external_data=UpperCAmelCase , all_tensors_to_one_file=UpperCAmelCase , location="weights.pb" , convert_attribute=UpperCAmelCase , )
del pipeline.unet
# VAE ENCODER
_snake_case : str = pipeline.vae
_snake_case : Dict = vae_encoder.config.in_channels
_snake_case : List[str] = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
_snake_case : int = lambda UpperCAmelCase , UpperCAmelCase : vae_encoder.encode(UpperCAmelCase , UpperCAmelCase )[0].sample()
onnx_export(
UpperCAmelCase , model_args=(
torch.randn(1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).to(device=UpperCAmelCase , dtype=UpperCAmelCase ),
False,
) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=UpperCAmelCase , )
# VAE DECODER
_snake_case : List[Any] = pipeline.vae
_snake_case : Any = vae_decoder.config.latent_channels
_snake_case : str = vae_decoder.config.out_channels
# forward only through the decoder part
_snake_case : Optional[Any] = vae_encoder.decode
onnx_export(
UpperCAmelCase , model_args=(
torch.randn(1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).to(device=UpperCAmelCase , dtype=UpperCAmelCase ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=UpperCAmelCase , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
_snake_case : List[str] = pipeline.safety_checker
_snake_case : int = safety_checker.config.vision_config.num_channels
_snake_case : Dict = safety_checker.config.vision_config.image_size
_snake_case : Optional[Any] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ).to(device=UpperCAmelCase , dtype=UpperCAmelCase ),
torch.randn(1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).to(device=UpperCAmelCase , dtype=UpperCAmelCase ),
) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
} , opset=UpperCAmelCase , )
del pipeline.safety_checker
_snake_case : Dict = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" )
_snake_case : Tuple = pipeline.feature_extractor
else:
_snake_case : str = None
_snake_case : List[str] = None
_snake_case : Union[str, Any] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) , scheduler=pipeline.scheduler , safety_checker=UpperCAmelCase , feature_extractor=UpperCAmelCase , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(UpperCAmelCase )
print("ONNX pipeline saved to" , UpperCAmelCase )
del pipeline
del onnx_pipeline
_snake_case : Any = OnnxStableDiffusionPipeline.from_pretrained(UpperCAmelCase , provider="CPUExecutionProvider" )
print("ONNX pipeline is loadable" )
if __name__ == "__main__":
__lowerCAmelCase :str = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
__lowerCAmelCase :Dict = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 705
|
def A ( UpperCAmelCase ):
if n == 1 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_snake_case : List[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def A ( UpperCAmelCase ):
_snake_case : Tuple = 0
_snake_case : Optional[Any] = 2
while digits < n:
index += 1
_snake_case : str = len(str(fibonacci(UpperCAmelCase ) ) )
return index
def A ( UpperCAmelCase = 1_000 ):
return fibonacci_digits_index(UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 278
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Tuple = logging.get_logger(__name__)
_lowercase : Any = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class _UpperCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = 'switch_transformers'
lowerCAmelCase = ['past_key_values']
lowerCAmelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , a__=3_2128 , a__=768 , a__=64 , a__=2048 , a__=64 , a__=12 , a__=3 , a__=12 , a__=3 , a__=12 , a__=8 , a__=False , a__=0.01 , a__="float32" , a__=False , a__=32 , a__=128 , a__=0.1 , a__=1e-6 , a__=0.0_01 , a__=0.0_01 , a__=1.0 , a__="relu" , a__=True , a__=False , a__=True , a__=0 , a__=1 , **a__ , ) -> List[Any]:
A = vocab_size
A = d_model
A = d_kv
A = d_ff
A = num_sparse_encoder_layers
A = num_layers
A = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
A = self.num_layers // self.num_sparse_encoder_layers
else:
A = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
A = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
A = self.num_decoder_layers # HACK: this will create 0 sparse layers
A = num_heads
A = num_experts
A = expert_capacity
A = router_bias
A = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
A = router_dtype
A = router_ignore_padding_tokens
A = relative_attention_num_buckets
A = relative_attention_max_distance
A = dropout_rate
A = layer_norm_epsilon
A = initializer_factor
A = feed_forward_proj
A = use_cache
A = add_router_probs
A = router_z_loss_coef
A = router_aux_loss_coef
A = self.feed_forward_proj.split("""-""" )
A = act_info[-1]
A = act_info[0] == '''gated'''
if len(A_ ) > 1 and act_info[0] != "gated" or len(A_ ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""\'gated-gelu\' or \'relu\'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A = '''gelu_new'''
super().__init__(
pad_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , **A_ , )
| 641
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __snake_case ( UpperCamelCase_ ):
_a = ['''image_processor''', '''tokenizer''']
_a = '''ViltImageProcessor'''
_a = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[Any] , A_ : str=None , A_ : Any=None , **A_ : Dict):
lowerCAmelCase_ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , A_ , )
lowerCAmelCase_ : Optional[int] = kwargs.pop('''feature_extractor''')
lowerCAmelCase_ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(A_ , A_)
lowerCAmelCase_ : Any = self.image_processor
def __call__( self : str , A_ : str , A_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , A_ : bool = True , A_ : Union[bool, str, PaddingStrategy] = False , A_ : Union[bool, str, TruncationStrategy] = None , A_ : Optional[int] = None , A_ : int = 0 , A_ : Optional[int] = None , A_ : Optional[bool] = None , A_ : Optional[bool] = None , A_ : bool = False , A_ : bool = False , A_ : bool = False , A_ : bool = False , A_ : bool = True , A_ : Optional[Union[str, TensorType]] = None , **A_ : Optional[Any] , ):
lowerCAmelCase_ : List[str] = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
# add pixel_values + pixel_mask
lowerCAmelCase_ : List[Any] = self.image_processor(A_ , return_tensors=A_)
encoding.update(A_)
return encoding
def UpperCAmelCase__ ( self : Tuple , *A_ : Union[str, Any] , **A_ : Any):
return self.tokenizer.batch_decode(*A_ , **A_)
def UpperCAmelCase__ ( self : Optional[Any] , *A_ : List[str] , **A_ : Union[str, Any]):
return self.tokenizer.decode(*A_ , **A_)
@property
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : List[str] = self.tokenizer.model_input_names
lowerCAmelCase_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def UpperCAmelCase__ ( self : Union[str, Any]):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , A_ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self : Dict):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , A_ , )
return self.image_processor
| 171
| 0
|
import numpy as np
def a__ ( snake_case , snake_case ):
"""simple docstring"""
return np.where(vector > 0 , UpperCamelCase__ , (alpha * (np.exp(UpperCamelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
import functools
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = len(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = len(snake_case )
@functools.cache
def min_distance(snake_case , snake_case ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , snake_case ) , 1 + min_distance(snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , a_ : int , a_ : MutableSequence[float] ):
"""simple docstring"""
if len(a_ ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
__snake_case = list(a_ )
__snake_case = degree
def __add__( self : List[str] , a_ : Polynomial ):
"""simple docstring"""
if self.degree > polynomial_a.degree:
__snake_case = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , a_ )
else:
__snake_case = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , a_ )
def __sub__( self : Any , a_ : Polynomial ):
"""simple docstring"""
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Union[str, Any] ):
"""simple docstring"""
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Optional[int] , a_ : Polynomial ):
"""simple docstring"""
__snake_case = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , a_ )
def A ( self : str , a_ : int | float ):
"""simple docstring"""
__snake_case = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : str ):
"""simple docstring"""
__snake_case = ""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(a_ )
return polynomial
def __repr__( self : str ):
"""simple docstring"""
return self.__str__()
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = [0] * self.degree
for i in range(self.degree ):
__snake_case = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , a_ )
def A ( self : List[Any] , a_ : int | float = 0 ):
"""simple docstring"""
__snake_case = [0] * (self.degree + 2)
__snake_case = constant
for i in range(self.degree + 1 ):
__snake_case = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , a_ )
def __eq__( self : Union[str, Any] , a_ : object ):
"""simple docstring"""
if not isinstance(a_ , a_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : int , a_ : object ):
"""simple docstring"""
return not self.__eq__(a_ )
| 69
|
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def __UpperCAmelCase ( _UpperCAmelCase : List[str] ) -> str:
if hor == 1_28:
__snake_case = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
__snake_case = (32, 1_28, 2_56)
__snake_case = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
__snake_case = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
__snake_case = (32, 64, 1_28, 2_56)
__snake_case = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
__snake_case = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
__snake_case = model.state_dict()
__snake_case = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_55_36,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
__snake_case = UNetaDModel(**_UpperCAmelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
__snake_case = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__snake_case = state_dict.pop(_UpperCAmelCase )
hf_value_function.load_state_dict(_UpperCAmelCase )
torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , "w" ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( ) -> List[Any]:
__snake_case = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 1_28, 2_56),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_55_36,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
__snake_case = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
__snake_case = model
__snake_case = UNetaDModel(**_UpperCAmelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
__snake_case = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__snake_case = state_dict.pop(_UpperCAmelCase )
hf_value_function.load_state_dict(_UpperCAmelCase )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 69
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def lowerCamelCase(self ):
A_ : Tuple = tempfile.mkdtemp()
A_ : Tuple = 8
# DPR tok
A_ : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
A_ : int = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
A_ : Tuple = os.path.join(lowerCAmelCase_ , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
A_ : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
A_ : Optional[Any] = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A_ : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A_ : Optional[int] = {"""unk_token""": """<unk>"""}
A_ : Tuple = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
A_ : int = os.path.join(lowerCAmelCase_ , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : Union[str, Any] = os.path.join(lowerCAmelCase_ , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
def lowerCamelCase(self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def lowerCamelCase(self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def lowerCamelCase(self ):
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def lowerCamelCase(self ):
A_ : int = os.path.join(self.tmpdirname , """rag_tokenizer""" )
A_ : Dict = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
A_ : Optional[int] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(lowerCAmelCase_ )
rag_tokenizer.save_pretrained(lowerCAmelCase_ )
A_ : Tuple = RagTokenizer.from_pretrained(lowerCAmelCase_ , config=lowerCAmelCase_ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , lowerCAmelCase_ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , lowerCAmelCase_ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def lowerCamelCase(self ):
A_ : Union[str, Any] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
A_ : List[Any] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
A_ : Any = tokenizer(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@slow
def lowerCamelCase(self ):
A_ : List[str] = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
A_ : Optional[Any] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
A_ : Dict = tokenizer(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
| 480
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
@staticmethod
def lowerCamelCase(*lowerCAmelCase_ , **lowerCAmelCase_ ):
pass
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
_A : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : Union[str, Any] = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
A_ : Union[str, Any] = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : str = vqa_pipeline(lowerCAmelCase_ , top_k=1 )
self.assertEqual(
lowerCAmelCase_ , [
[{"""score""": ANY(lowerCAmelCase_ ), """answer""": ANY(lowerCAmelCase_ )}],
[{"""score""": ANY(lowerCAmelCase_ ), """answer""": ANY(lowerCAmelCase_ )}],
] , )
@require_torch
def lowerCamelCase(self ):
A_ : Tuple = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
A_ : Any = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
A_ : Any = """How many cats are there?"""
A_ : Optional[int] = vqa_pipeline(image=lowerCAmelCase_ , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
lowerCAmelCase_ , [{"""score""": ANY(lowerCAmelCase_ ), """answer""": ANY(lowerCAmelCase_ )}, {"""score""": ANY(lowerCAmelCase_ ), """answer""": ANY(lowerCAmelCase_ )}] )
A_ : Any = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
lowerCAmelCase_ , [{"""score""": ANY(lowerCAmelCase_ ), """answer""": ANY(lowerCAmelCase_ )}, {"""score""": ANY(lowerCAmelCase_ ), """answer""": ANY(lowerCAmelCase_ )}] )
@slow
@require_torch
def lowerCamelCase(self ):
A_ : int = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
A_ : Tuple = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
A_ : int = """How many cats are there?"""
A_ : List[Any] = vqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
A_ : Any = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
A_ : Tuple = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def lowerCamelCase(self ):
pass
| 480
| 1
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class snake_case__ ( __A ):
UpperCAmelCase : List[Any] = """umt5"""
UpperCAmelCase : List[str] = ["""past_key_values"""]
def __init__( self , UpperCamelCase_=250112 , UpperCamelCase_=512 , UpperCamelCase_=64 , UpperCamelCase_=1024 , UpperCamelCase_=8 , UpperCamelCase_=None , UpperCamelCase_=6 , UpperCamelCase_=32 , UpperCamelCase_=128 , UpperCamelCase_=0.1 , UpperCamelCase_=1e-6 , UpperCamelCase_=1.0 , UpperCamelCase_="gated-gelu" , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_="T5Tokenizer" , UpperCamelCase_=True , UpperCamelCase_=0 , UpperCamelCase_=1 , UpperCamelCase_=0 , **UpperCamelCase_ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
is_encoder_decoder=UpperCamelCase_ , tokenizer_class=UpperCamelCase_ , tie_word_embeddings=UpperCamelCase_ , pad_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
a_ : Tuple = vocab_size
a_ : List[Any] = d_model
a_ : Tuple = d_kv
a_ : Optional[int] = d_ff
a_ : int = num_layers
a_ : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ : Optional[Any] = num_heads
a_ : Dict = relative_attention_num_buckets
a_ : Optional[Any] = relative_attention_max_distance
a_ : Union[str, Any] = dropout_rate
a_ : Optional[Any] = layer_norm_epsilon
a_ : Any = initializer_factor
a_ : Any = feed_forward_proj
a_ : Optional[Any] = use_cache
a_ : Union[str, Any] = self.feed_forward_proj.split("""-""" )
a_ : int = act_info[-1]
a_ : List[Any] = act_info[0] == """gated"""
if len(UpperCamelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCamelCase_ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
a_ : List[Any] = """gelu_new"""
@property
def A ( self ) -> int:
"""simple docstring"""
return self.d_model
@property
def A ( self ) -> List[Any]:
"""simple docstring"""
return self.num_heads
@property
def A ( self ) -> Dict:
"""simple docstring"""
return self.num_layers
class snake_case__ ( __A ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def A ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
a_ : Optional[Any] = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
a_ : Optional[Any] = """past_encoder_sequence + sequence"""
a_ : Optional[Any] = {0: """batch"""}
a_ : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
a_ : Optional[Any] = {0: """batch""", 1: """decoder_sequence"""}
a_ : List[Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ , direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def A ( self ) -> int:
"""simple docstring"""
return 13
@property
def A ( self ) -> float:
"""simple docstring"""
return 5e-4
| 419
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class snake_case__ ( unittest.TestCase ):
def A ( self ) -> int:
"""simple docstring"""
a_ : List[str] = """ZinengTang/tvlt-base"""
a_ : Dict = tempfile.mkdtemp()
def A ( self , **UpperCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
return TvltImageProcessor.from_pretrained(self.checkpoint , **UpperCamelCase_ )
def A ( self , **UpperCamelCase_ ) -> Tuple:
"""simple docstring"""
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **UpperCamelCase_ )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self ) -> List[str]:
"""simple docstring"""
a_ : int = self.get_image_processor()
a_ : Optional[int] = self.get_feature_extractor()
a_ : List[str] = TvltProcessor(image_processor=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
a_ : Optional[int] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , UpperCamelCase_ )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def A ( self ) -> Dict:
"""simple docstring"""
a_ : Any = self.get_image_processor()
a_ : Tuple = self.get_feature_extractor()
a_ : Any = TvltProcessor(image_processor=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
a_ : Tuple = np.ones([12000] )
a_ : Tuple = feature_extractor(UpperCamelCase_ , return_tensors="""np""" )
a_ : List[Any] = processor(audio=UpperCamelCase_ , return_tensors="""np""" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A ( self ) -> Optional[int]:
"""simple docstring"""
a_ : int = self.get_image_processor()
a_ : Dict = self.get_feature_extractor()
a_ : int = TvltProcessor(image_processor=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
a_ : List[str] = np.ones([3, 224, 224] )
a_ : str = image_processor(UpperCamelCase_ , return_tensors="""np""" )
a_ : Tuple = processor(images=UpperCamelCase_ , return_tensors="""np""" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A ( self ) -> List[Any]:
"""simple docstring"""
a_ : Optional[int] = self.get_image_processor()
a_ : Union[str, Any] = self.get_feature_extractor()
a_ : Any = TvltProcessor(image_processor=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
a_ : Any = np.ones([12000] )
a_ : Optional[Any] = np.ones([3, 224, 224] )
a_ : Tuple = processor(audio=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def A ( self ) -> Optional[Any]:
"""simple docstring"""
a_ : List[str] = self.get_image_processor()
a_ : Union[str, Any] = self.get_feature_extractor()
a_ : Any = TvltProcessor(image_processor=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" , )
| 419
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_lowercase = None
_lowercase = logging.get_logger(__name__)
_lowercase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_lowercase = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
_lowercase = {
'facebook/nllb-large-en-ro': 1_024,
'facebook/nllb-200-distilled-600M': 1_024,
}
# fmt: off
_lowercase = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class _lowercase ( __a ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = ['''input_ids''', '''attention_mask''']
_UpperCAmelCase = NllbTokenizer
_UpperCAmelCase = []
_UpperCAmelCase = []
def __init__( self , A__=None , A__=None , A__="<s>" , A__="</s>" , A__="</s>" , A__="<s>" , A__="<unk>" , A__="<pad>" , A__="<mask>" , A__=None , A__=None , A__=None , A__=False , **A__ , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else mask_token
snake_case = legacy_behaviour
super().__init__(
vocab_file=A__ , tokenizer_file=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , src_lang=A__ , tgt_lang=A__ , additional_special_tokens=A__ , legacy_behaviour=A__ , **A__ , )
snake_case = vocab_file
snake_case = False if not self.vocab_file else True
snake_case = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
snake_case = {
lang_code: self.convert_tokens_to_ids(A__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case = src_lang if src_lang is not None else '''eng_Latn'''
snake_case = self.convert_tokens_to_ids(self._src_lang )
snake_case = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase ( self ) -> str:
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self , A__ ) -> None:
snake_case = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self , A__ , A__ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self , A__ , A__ = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , **A__ ) -> Union[str, Any]:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
snake_case = src_lang
snake_case = self(A__ , add_special_tokens=A__ , return_tensors=A__ , **A__ )
snake_case = self.convert_tokens_to_ids(A__ )
snake_case = tgt_lang_id
return inputs
def UpperCamelCase ( self , A__ , A__ = "eng_Latn" , A__ = None , A__ = "fra_Latn" , **A__ , ) -> BatchEncoding:
snake_case = src_lang
snake_case = tgt_lang
return super().prepare_seqaseq_batch(A__ , A__ , **A__ )
def UpperCamelCase ( self ) -> List[Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self , A__ ) -> None:
snake_case = self.convert_tokens_to_ids(A__ )
if self.legacy_behaviour:
snake_case = []
snake_case = [self.eos_token_id, self.cur_lang_code]
else:
snake_case = [self.cur_lang_code]
snake_case = [self.eos_token_id]
snake_case = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self , A__ ) -> None:
snake_case = self.convert_tokens_to_ids(A__ )
if self.legacy_behaviour:
snake_case = []
snake_case = [self.eos_token_id, self.cur_lang_code]
else:
snake_case = [self.cur_lang_code]
snake_case = [self.eos_token_id]
snake_case = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(A__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
snake_case = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ):
copyfile(self.vocab_file , A__ )
return (out_vocab_file,)
| 720
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def __UpperCamelCase ( a : Dict , a : Optional[int] , a : Dict , a : Dict ) ->Union[str, Any]:
snake_case = original_name.split('''.''' )[0]
snake_case = key.split('''.''' )
snake_case = int(key_list[key_list.index(a ) - 2] )
snake_case = int(key_list[key_list.index(a ) - 1] )
snake_case = orig_block_num - offset
snake_case = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def __UpperCamelCase ( a : Tuple ) ->Dict:
snake_case = OrderedDict()
snake_case , snake_case = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
snake_case = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
snake_case = key[: key.find('''proj''' )]
snake_case = key.replace(a , f"""patch_embeddings.{total_embed_found}.""" )
snake_case = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
snake_case = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
snake_case = replace_key_with_offset(a , a , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
snake_case = replace_key_with_offset(a , a , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
snake_case = replace_key_with_offset(a , a , '''norm1''' , '''before_norm''' )
if "norm2" in key:
snake_case = replace_key_with_offset(a , a , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
snake_case = replace_key_with_offset(a , a , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
snake_case = replace_key_with_offset(a , a , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
snake_case = key.replace('''head''' , '''classifier''' )
snake_case = value
return new_state_dict
def __UpperCamelCase ( ) ->Optional[int]:
snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case = Image.open(requests.get(a , stream=a ).raw )
return image
@torch.no_grad()
def __UpperCamelCase ( a : Dict , a : Optional[Any] , a : Tuple ) ->List[str]:
snake_case = PoolFormerConfig()
# set attributes based on model_name
snake_case = '''huggingface/label-files'''
snake_case = model_name[-3:]
snake_case = 1000
snake_case = '''imagenet-1k-id2label.json'''
snake_case = (1, 1000)
# set config attributes
snake_case = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
snake_case = {int(a ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
if size == "s12":
snake_case = [2, 2, 6, 2]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 0.9
elif size == "s24":
snake_case = [4, 4, 12, 4]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 0.9
elif size == "s36":
snake_case = [6, 6, 18, 6]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.9
elif size == "m36":
snake_case = [6, 6, 18, 6]
snake_case = [96, 192, 384, 768]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.95
elif size == "m48":
snake_case = [8, 8, 24, 8]
snake_case = [96, 192, 384, 768]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.95
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
snake_case = PoolFormerImageProcessor(crop_pct=a )
# Prepare image
snake_case = prepare_img()
snake_case = image_processor(images=a , return_tensors='''pt''' ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
snake_case = torch.load(a , map_location=torch.device('''cpu''' ) )
# rename keys
snake_case = rename_keys(a )
# create HuggingFace model and load state dict
snake_case = PoolFormerForImageClassification(a )
model.load_state_dict(a )
model.eval()
# Define image processor
snake_case = PoolFormerImageProcessor(crop_pct=a )
snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
snake_case = model(a )
snake_case = outputs.logits
# define expected logit slices for different models
if size == "s12":
snake_case = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
snake_case = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
snake_case = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
snake_case = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
snake_case = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a , atol=1e-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_lowercase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 44
| 0
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( a_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =(EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE_ : int =10
def __lowerCAmelCase ( self : int , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = {
'num_train_timesteps': 11_00,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self : Dict ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase = sample.to(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE )
UpperCamelCase = output.prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type='v_prediction' )
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase = sample.to(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE )
UpperCamelCase = output.prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def __lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps , device=_SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCamelCase = sample.to(_SCREAMING_SNAKE_CASE )
for t in scheduler.timesteps:
UpperCamelCase = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE )
UpperCamelCase = output.prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE , use_karras_sigmas=_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps , device=_SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCamelCase = sample.to(_SCREAMING_SNAKE_CASE )
for t in scheduler.timesteps:
UpperCamelCase = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE )
UpperCamelCase = output.prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3
| 282
|
'''simple docstring'''
import operator
def snake_case ( snake_case : list , snake_case : bool = False , snake_case : list | None = None ) -> list:
"""simple docstring"""
lowerCAmelCase = operator.lt if reverse else operator.gt
lowerCAmelCase = solution or []
if not arr:
return solution
lowerCAmelCase = [arr.pop(0 )]
for i, item in enumerate(snake_case ):
if _operator(snake_case , sublist[-1] ):
sublist.append(snake_case )
arr.pop(snake_case )
# merging sublist into solution list
if not solution:
solution.extend(snake_case )
else:
while sublist:
lowerCAmelCase = sublist.pop(0 )
for i, xx in enumerate(snake_case ):
if not _operator(snake_case , snake_case ):
solution.insert(snake_case , snake_case )
break
else:
solution.append(snake_case )
strand_sort(snake_case , snake_case , snake_case )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 284
| 0
|
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = {"vocab_file": "vocab.json"}
a = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
a = {"mgp-str": 27}
class _A ( __lowercase ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="[GO]" , _SCREAMING_SNAKE_CASE="[GO]" , _SCREAMING_SNAKE_CASE="[s]" , _SCREAMING_SNAKE_CASE="[GO]" , **_SCREAMING_SNAKE_CASE ):
super().__init__(
unk_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as vocab_handle:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {v: k for k, v in self.vocab.items()}
@property
def UpperCAmelCase ( self ):
return len(self.vocab )
def UpperCAmelCase ( self ):
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = []
for s in text:
char_tokens.extend(_SCREAMING_SNAKE_CASE )
return char_tokens
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.vocab.get(_SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.decoder.get(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("""Vocabulary path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) )
return
_UpperCAmelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) + """\n""" )
return (vocab_file,)
| 721
|
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> bool:
_UpperCAmelCase = len(snake_case ) + 1
_UpperCAmelCase = len(snake_case ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_UpperCAmelCase = [[0 for i in range(snake_case )] for j in range(snake_case )]
# since string of zero length match pattern of zero length
_UpperCAmelCase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , snake_case ):
_UpperCAmelCase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , snake_case ):
_UpperCAmelCase = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , snake_case ):
for j in range(1 , snake_case ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_UpperCAmelCase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_UpperCAmelCase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_UpperCAmelCase = dp[i - 1][j]
else:
_UpperCAmelCase = 0
else:
_UpperCAmelCase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
a = "aab"
a = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'{input_string} matches the given pattern {pattern}')
else:
print(F'{input_string} does not match with the given pattern {pattern}')
| 175
| 0
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : str = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case , __snake_case : Tuple = emb.weight.shape
__snake_case : List[str] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
__snake_case : List[str] = emb.weight.data
return lin_layer
def _a ( _lowerCamelCase , _lowerCamelCase="facebook/mbart-large-en-ro" , _lowerCamelCase=False , _lowerCamelCase=False ) -> List[str]:
"""simple docstring"""
__snake_case : Any = torch.load(_lowerCamelCase , map_location="""cpu""" )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : List[str] = state_dict["""encoder.embed_tokens.weight"""].shape[0]
__snake_case : Tuple = MBartConfig.from_pretrained(_lowerCamelCase , vocab_size=_lowerCamelCase )
if mbart_aa and finetuned:
__snake_case : Dict = """relu"""
__snake_case : Union[str, Any] = state_dict["""decoder.embed_tokens.weight"""]
__snake_case : Optional[int] = MBartForConditionalGeneration(_lowerCamelCase )
model.model.load_state_dict(_lowerCamelCase )
if finetuned:
__snake_case : Any = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 26
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Tuple = ["image_processor", "tokenizer"]
_SCREAMING_SNAKE_CASE : Optional[int] = "CLIPImageProcessor"
_SCREAMING_SNAKE_CASE : str = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__(self : Tuple , snake_case_ : str=None , snake_case_ : str=None , **snake_case_ : str ):
__a : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , snake_case_ , )
__a : Optional[int] = kwargs.pop('''feature_extractor''' )
__a : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(snake_case_ , snake_case_ )
def __call__(self : List[Any] , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : Dict=None , **snake_case_ : int ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__a : Optional[int] = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if images is not None:
__a : Optional[Any] = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is not None and images is not None:
__a : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def lowerCAmelCase (self : Union[str, Any] , *snake_case_ : int , **snake_case_ : str ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCAmelCase (self : Any , *snake_case_ : List[str] , **snake_case_ : Dict ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowerCAmelCase (self : Optional[Any] ):
__a : int = self.tokenizer.model_input_names
__a : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 521
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=4 , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Union[str, Any] = batch_size
_lowerCAmelCase : Union[str, Any] = seq_length
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : List[Any] = use_attention_mask
_lowerCAmelCase : Any = use_token_type_ids
_lowerCAmelCase : Optional[Any] = use_labels
_lowerCAmelCase : Tuple = vocab_size
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : List[Any] = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : int = max_position_embeddings
_lowerCAmelCase : Optional[int] = type_vocab_size
_lowerCAmelCase : Dict = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Optional[Any] = num_choices
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Dict = None
if self.use_attention_mask:
_lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Any = None
if self.use_token_type_ids:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Tuple = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = config_and_inputs
_lowerCAmelCase : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = config_and_inputs
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = True
__magic_name__ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = FlaxBertModelTester(self )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = FlaxBertModel.from_pretrained('bert-base-cased' )
_lowerCAmelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case__ )
| 630
|
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCAmelCase : Tuple = False
lowerCAmelCase : str = True
lowerCAmelCase : List[Any] = False
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowerCAmelCase : Optional[int] = parser.parse_args()
lowerCAmelCase : int = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
lowerCAmelCase : int = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
lowerCAmelCase : Optional[Any] = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
lowerCAmelCase : int = reader.read()
lowerCAmelCase : List[str] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
lowerCAmelCase : str = UNetaDModel(**config)
else:
lowerCAmelCase : Union[str, Any] = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
lowerCAmelCase : Dict = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCAmelCase : Union[str, Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCAmelCase : str = config[key]
del config[key]
lowerCAmelCase : Optional[int] = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
lowerCAmelCase : Dict = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
lowerCAmelCase : Tuple = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
lowerCAmelCase : str = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
lowerCAmelCase : str = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
lowerCAmelCase : Dict = param_value
lowerCAmelCase : Tuple = True
if not has_changed:
lowerCAmelCase : Tuple = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 630
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase: Optional[int] = logging.get_logger(__name__)
class lowercase_ (lowercase__ ):
snake_case ='timm_backbone'
def __init__( self , lowercase_=None , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=None , **lowercase_ , ) -> str:
super().__init__(**lowercase_)
a__ =backbone
a__ =num_channels
a__ =features_only
a__ =use_pretrained_backbone
a__ =True
a__ =out_indices if out_indices is not None else (-1,)
| 20
|
'''simple docstring'''
from __future__ import annotations
from random import random
class snake_case :
"""simple docstring"""
def __init__( self : Tuple , __A : int | None = None ):
__UpperCamelCase = value
__UpperCamelCase = random()
__UpperCamelCase = None
__UpperCamelCase = None
def __repr__( self : List[str] ):
from pprint import pformat
if self.left is None and self.right is None:
return f'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1 )
def __str__( self : List[str] ):
__UpperCamelCase = str(self.value ) + ' '
__UpperCamelCase = str(self.left or '' )
__UpperCamelCase = str(self.right or '' )
return value + left + right
def lowercase__ ( __lowercase : Node | None , __lowercase : int ) -> tuple[Node | None, Node | None]:
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
__UpperCamelCase , __UpperCamelCase = split(root.left , __lowercase )
return left, root
else:
__UpperCamelCase , __UpperCamelCase = split(root.right , __lowercase )
return root, right
def lowercase__ ( __lowercase : Node | None , __lowercase : Node | None ) -> Node | None:
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
__UpperCamelCase = merge(left.right , __lowercase )
return left
else:
__UpperCamelCase = merge(__lowercase , right.left )
return right
def lowercase__ ( __lowercase : Node | None , __lowercase : int ) -> Node | None:
"""simple docstring"""
__UpperCamelCase = Node(__lowercase )
__UpperCamelCase , __UpperCamelCase = split(__lowercase , __lowercase )
return merge(merge(__lowercase , __lowercase ) , __lowercase )
def lowercase__ ( __lowercase : Node | None , __lowercase : int ) -> Node | None:
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase = split(__lowercase , value - 1 )
__UpperCamelCase , __UpperCamelCase = split(__lowercase , __lowercase )
return merge(__lowercase , __lowercase )
def lowercase__ ( __lowercase : Node | None ) -> None:
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def lowercase__ ( __lowercase : Node | None , __lowercase : str ) -> Node | None:
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
__UpperCamelCase = insert(__lowercase , int(arg[1:] ) )
elif arg[0] == "-":
__UpperCamelCase = erase(__lowercase , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def lowercase__ ( ) -> None:
"""simple docstring"""
__UpperCamelCase = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
__UpperCamelCase = input()
while args != "q":
__UpperCamelCase = interact_treap(__lowercase , __lowercase )
print(__lowercase )
__UpperCamelCase = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 399
| 0
|
'''simple docstring'''
def _A ( A = 2_0_0 ) -> int:
lowercase : str = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
lowercase : List[str] = [0] * (pence + 1)
lowercase : Optional[int] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(A ,pence + 1 ,1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_0_0) == 7_3_6_8_2
| 720
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
def __init__( self , a_ , a_=1_3 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=9_9 , a_=3_2 , a_=5 , a_=4 , a_=3_7 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=1_6 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> Any:
lowercase : List[str] = parent
lowercase : str = batch_size
lowercase : int = seq_length
lowercase : Any = is_training
lowercase : List[Any] = use_input_mask
lowercase : str = use_token_type_ids
lowercase : List[str] = use_labels
lowercase : Optional[Any] = vocab_size
lowercase : List[Any] = hidden_size
lowercase : List[Any] = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : Union[str, Any] = intermediate_size
lowercase : Union[str, Any] = hidden_act
lowercase : Optional[Any] = hidden_dropout_prob
lowercase : Union[str, Any] = attention_probs_dropout_prob
lowercase : List[str] = max_position_embeddings
lowercase : Tuple = type_vocab_size
lowercase : Dict = type_sequence_label_size
lowercase : Optional[Any] = initializer_range
lowercase : int = num_labels
lowercase : int = num_choices
lowercase : Tuple = scope
def a__ ( self ) -> Dict:
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] = None
if self.use_input_mask:
lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
lowercase : str = None
lowercase : Optional[int] = None
if self.use_labels:
lowercase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ) -> Any:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> List[Any]:
lowercase : Tuple = DistilBertModel(config=a_ )
model.to(a_ )
model.eval()
lowercase : Optional[Any] = model(a_ , a_ )
lowercase : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> str:
lowercase : List[Any] = DistilBertForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
lowercase : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> int:
lowercase : Optional[Any] = DistilBertForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
lowercase : List[Any] = model(
a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
lowercase : Union[str, Any] = self.num_labels
lowercase : Optional[int] = DistilBertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowercase : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
lowercase : List[Any] = self.num_labels
lowercase : Optional[Any] = DistilBertForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
lowercase : Optional[Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[int]:
lowercase : Optional[int] = self.num_choices
lowercase : Tuple = DistilBertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
lowercase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Tuple = model(
a_ , attention_mask=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self ) -> Tuple:
lowercase : Any = self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : int = config_and_inputs
lowercase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
_snake_case = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_snake_case = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = True
_snake_case = True
_snake_case = True
_snake_case = True
def a__ ( self ) -> int:
lowercase : str = DistilBertModelTester(self )
lowercase : Optional[Any] = ConfigTester(self , config_class=a_ , dim=3_7 )
def a__ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> List[Any]:
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a_ )
def a__ ( self ) -> Tuple:
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ )
def a__ ( self ) -> Union[str, Any]:
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a_ )
def a__ ( self ) -> Optional[Any]:
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ )
def a__ ( self ) -> Optional[Any]:
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a_ )
def a__ ( self ) -> List[str]:
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ )
@slow
def a__ ( self ) -> List[Any]:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Union[str, Any] = DistilBertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def a__ ( self ) -> Tuple:
lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase : Any = True
lowercase : List[str] = model_class(config=a_ )
lowercase : Optional[int] = self._prepare_for_class(a_ , a_ )
lowercase : Union[str, Any] = torch.jit.trace(
a_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , "traced_model.pt" ) )
lowercase : str = torch.jit.load(os.path.join(a_ , "traced_model.pt" ) , map_location=a_ )
loaded(inputs_dict["input_ids"].to(a_ ) , inputs_dict["attention_mask"].to(a_ ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def a__ ( self ) -> List[str]:
lowercase : Dict = DistilBertModel.from_pretrained("distilbert-base-uncased" )
lowercase : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase : Union[str, Any] = model(a_ , attention_mask=a_ )[0]
lowercase : List[str] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a_ )
lowercase : Optional[int] = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1e-4 ) )
| 425
| 0
|
"""simple docstring"""
import numpy
# List of input, output pairs
SCREAMING_SNAKE_CASE__ : Optional[Any] =(
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
SCREAMING_SNAKE_CASE__ : str =(((515, 22, 13), 555), ((61, 35, 49), 150))
SCREAMING_SNAKE_CASE__ : int =[2, 4, 1, 5]
SCREAMING_SNAKE_CASE__ : Any =len(train_data)
SCREAMING_SNAKE_CASE__ : List[Any] =0.009
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="train" ) ->List[str]:
return calculate_hypothesis_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) - output(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Tuple:
_lowerCamelCase : int = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Optional[Any]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=m ) ->List[str]:
_lowerCamelCase : Tuple = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
if index == -1:
summation_value += _error(SCREAMING_SNAKE_CASE_ )
else:
summation_value += _error(SCREAMING_SNAKE_CASE_ ) * train_data[i][0][index]
return summation_value
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->List[str]:
_lowerCamelCase : Optional[Any] = summation_of_cost_derivative(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) / m
return cost_derivative_value
def UpperCamelCase ( ) ->Optional[Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_lowerCamelCase : Dict = 0.000002
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Union[str, Any] = 0
while True:
j += 1
_lowerCamelCase : str = [0, 0, 0, 0]
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
_lowerCamelCase : Optional[int] = get_cost_derivative(i - 1 )
_lowerCamelCase : Optional[Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ , rtol=SCREAMING_SNAKE_CASE_ , ):
break
_lowerCamelCase : List[str] = temp_parameter_vector
print(('''Number of iterations:''', j) )
def UpperCamelCase ( ) ->Optional[Any]:
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
print(('''Actual output value:''', output(SCREAMING_SNAKE_CASE_ , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(SCREAMING_SNAKE_CASE_ , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 434
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
SCREAMING_SNAKE_CASE__ : List[str] ='\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
@staticmethod
def a__ ( _lowercase ) -> Dict:
_lowerCamelCase : Any = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=_lowercase , required=_lowercase , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=_lowercase , required=_lowercase , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=_lowercase , required=_lowercase , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=_lowercase , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=_lowercase , default=_lowercase , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=_lowercase )
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , *_lowercase , ) -> str:
_lowerCamelCase : Tuple = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F'''Loading model {model_type}''' )
_lowerCamelCase : List[Any] = model_type
_lowerCamelCase : Union[str, Any] = tf_checkpoint
_lowerCamelCase : Tuple = pytorch_dump_output
_lowerCamelCase : Tuple = config
_lowerCamelCase : Optional[Any] = finetuning_task_name
def a__ ( self ) -> str:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
if "ckpt" in self._tf_checkpoint.lower():
_lowerCamelCase : Tuple = self._tf_checkpoint
_lowerCamelCase : int = ''''''
else:
_lowerCamelCase : List[str] = self._tf_checkpoint
_lowerCamelCase : str = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
_lowercase , self._config , self._pytorch_dump_output , _lowercase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 434
| 1
|
'''simple docstring'''
__magic_name__ : Any = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 720
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = BlenderbotSmallConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = 'gelu'
def __init__( self : Optional[Any] , __A : Optional[Any] , __A : Optional[Any]=1_3 , __A : List[str]=7 , __A : List[str]=True , __A : Tuple=False , __A : str=9_9 , __A : Union[str, Any]=3_2 , __A : str=2 , __A : Optional[Any]=4 , __A : Optional[int]=3_7 , __A : str=0.1 , __A : str=0.1 , __A : int=2_0 , __A : Any=2 , __A : str=1 , __A : Union[str, Any]=0 , ):
"""simple docstring"""
_lowercase = parent
_lowercase = batch_size
_lowercase = seq_length
_lowercase = is_training
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = eos_token_id
_lowercase = pad_token_id
_lowercase = bos_token_id
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowercase = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowercase = prepare_blenderbot_small_inputs_dict(__A , __A , __A )
return config, inputs_dict
def snake_case ( self : int , __A : Tuple , __A : List[str] ):
"""simple docstring"""
_lowercase = TFBlenderbotSmallModel(config=__A ).get_decoder()
_lowercase = inputs_dict["input_ids"]
_lowercase = input_ids[:1, :]
_lowercase = inputs_dict["attention_mask"][:1, :]
_lowercase = inputs_dict["head_mask"]
_lowercase = 1
# first forward pass
_lowercase = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
_lowercase , _lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowercase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowercase = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowercase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowercase = model(__A , attention_mask=__A )[0]
_lowercase = model(__A , attention_mask=__A , past_key_values=__A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowercase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowercase = output_from_no_past[:, -3:, random_slice_idx]
_lowercase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__A , __A , rtol=1e-3 )
def A__ ( A_ , A_ , A_ , A_=None , A_=None , A_=None , A_=None , A_=None , ) -> Optional[Any]:
if attention_mask is None:
_lowercase = tf.cast(tf.math.not_equal(A_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowercase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
UpperCAmelCase__ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
_lowercase = TFBlenderbotSmallModelTester(self )
_lowercase = ConfigTester(self , config_class=__A )
def snake_case ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self : Optional[int] ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__A )
@require_tokenizers
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
UpperCAmelCase__ = 'facebook/blenderbot_small-90M'
@cached_property
def snake_case ( self : str ):
"""simple docstring"""
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
_lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = self.tokenizer(self.src_text , return_tensors="tf" )
_lowercase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__A , )
_lowercase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__A )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 602
| 0
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def __UpperCamelCase ( *a : str , **a : int ) -> str:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MODEL_FOR_OBJECT_DETECTION_MAPPING
def __UpperCamelCase ( self : Optional[Any] , a : str , a : Optional[Any] , a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ObjectDetectionPipeline(model=a , image_processor=a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __UpperCamelCase ( self : List[Any] , a : Optional[int] , a : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
import datasets
SCREAMING_SNAKE_CASE : Any = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
SCREAMING_SNAKE_CASE : Dict = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
SCREAMING_SNAKE_CASE : Tuple = object_detector(a , threshold=0.0 )
self.assertEqual(len(a ) , len(a ) )
for outputs in batch_outputs:
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
pass
@require_torch
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = "hf-internal-testing/tiny-detr-mobilenetsv3"
SCREAMING_SNAKE_CASE : Dict = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : Tuple = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
SCREAMING_SNAKE_CASE : Dict = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : int = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : int = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Tuple = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : str = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 0.9985
SCREAMING_SNAKE_CASE : int = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : List[str] = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=a )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = "Narsil/layoutlmv3-finetuned-funsd"
SCREAMING_SNAKE_CASE : Dict = 0.9993
SCREAMING_SNAKE_CASE : str = pipeline("object-detection" , model=a , threshold=a )
SCREAMING_SNAKE_CASE : List[Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 25
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __snake_case (_a ):
lowerCAmelCase__ = "naver-clova-ix/donut-base-finetuned-docvqa"
lowerCAmelCase__ = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
lowerCAmelCase__ = "document_qa"
lowerCAmelCase__ = AutoProcessor
lowerCAmelCase__ = VisionEncoderDecoderModel
lowerCAmelCase__ = ["image", "text"]
lowerCAmelCase__ = ["text"]
def __init__( self : str , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : "Image" , _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : List[str] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCAmelCase : Dict = task_prompt.replace("""{user_input}""" , _UpperCAmelCase )
_lowerCAmelCase : str = self.pre_processor.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors="""pt""" ).input_ids
_lowerCAmelCase : Dict = self.pre_processor(_UpperCAmelCase , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_UpperCAmelCase , ).sequences
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : Tuple ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Dict = self.pre_processor.batch_decode(_UpperCAmelCase )[0]
_lowerCAmelCase : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
_lowerCAmelCase : Union[str, Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
_lowerCAmelCase : List[Any] = re.sub(R"""<.*?>""" , """""" , _UpperCAmelCase , count=1 ).strip() # remove first task start token
_lowerCAmelCase : Tuple = self.pre_processor.tokenajson(_UpperCAmelCase )
return sequence["answer"]
| 429
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class a ( a_ ):
UpperCAmelCase_ : torch.FloatTensor
class a ( a_, a_ ):
@register_to_config
def __init__( self , _lowerCamelCase = 6_5_5_3_6 , _lowerCamelCase = None , _lowerCamelCase = 2 , _lowerCamelCase = 2 , _lowerCamelCase = 0 , _lowerCamelCase = "fourier" , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = 0.0 , _lowerCamelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _lowerCamelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _lowerCamelCase = "UNetMidBlock1D" , _lowerCamelCase = None , _lowerCamelCase = (3_2, 3_2, 6_4) , _lowerCamelCase = None , _lowerCamelCase = 8 , _lowerCamelCase = 1 , _lowerCamelCase = False , ):
super().__init__()
lowercase = sample_size
# time
if time_embedding_type == "fourier":
lowercase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_lowerCamelCase , log=_lowerCamelCase , flip_sin_to_cos=_lowerCamelCase )
lowercase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowercase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_lowerCamelCase , downscale_freq_shift=_lowerCamelCase )
lowercase = block_out_channels[0]
if use_timestep_embedding:
lowercase = block_out_channels[0] * 4
lowercase = TimestepEmbedding(
in_channels=_lowerCamelCase , time_embed_dim=_lowerCamelCase , act_fn=_lowerCamelCase , out_dim=block_out_channels[0] , )
lowercase = nn.ModuleList([] )
lowercase = None
lowercase = nn.ModuleList([] )
lowercase = None
# down
lowercase = in_channels
for i, down_block_type in enumerate(_lowerCamelCase ):
lowercase = output_channel
lowercase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowercase = i == len(_lowerCamelCase ) - 1
lowercase = get_down_block(
_lowerCamelCase , num_layers=_lowerCamelCase , in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_lowerCamelCase )
# mid
lowercase = get_mid_block(
_lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_lowerCamelCase , add_downsample=_lowerCamelCase , )
# up
lowercase = list(reversed(_lowerCamelCase ) )
lowercase = reversed_block_out_channels[0]
if out_block_type is None:
lowercase = out_channels
else:
lowercase = block_out_channels[0]
for i, up_block_type in enumerate(_lowerCamelCase ):
lowercase = output_channel
lowercase = (
reversed_block_out_channels[i + 1] if i < len(_lowerCamelCase ) - 1 else final_upsample_channels
)
lowercase = i == len(_lowerCamelCase ) - 1
lowercase = get_up_block(
_lowerCamelCase , num_layers=_lowerCamelCase , in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_lowerCamelCase )
lowercase = output_channel
# out
lowercase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 3_2 )
lowercase = get_out_block(
out_block_type=_lowerCamelCase , num_groups_out=_lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=_lowerCamelCase , act_fn=_lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ):
lowercase = timestep
if not torch.is_tensor(_lowerCamelCase ):
lowercase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(_lowerCamelCase ) and len(timesteps.shape ) == 0:
lowercase = timesteps[None].to(sample.device )
lowercase = self.time_proj(_lowerCamelCase )
if self.config.use_timestep_embedding:
lowercase = self.time_mlp(_lowerCamelCase )
else:
lowercase = timestep_embed[..., None]
lowercase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowercase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowercase = ()
for downsample_block in self.down_blocks:
lowercase , lowercase = downsample_block(hidden_states=_lowerCamelCase , temb=_lowerCamelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowercase = self.mid_block(_lowerCamelCase , _lowerCamelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowercase = down_block_res_samples[-1:]
lowercase = down_block_res_samples[:-1]
lowercase = upsample_block(_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , temb=_lowerCamelCase )
# 5. post-process
if self.out_block:
lowercase = self.out_block(_lowerCamelCase , _lowerCamelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_lowerCamelCase )
| 720
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : int = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Any = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
_UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 134
| 0
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A ( __UpperCamelCase ) -> List[str]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X20000 and cp <= 0X2a6df) #
or (cp >= 0X2a700 and cp <= 0X2b73f) #
or (cp >= 0X2b740 and cp <= 0X2b81f) #
or (cp >= 0X2b820 and cp <= 0X2ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2f800 and cp <= 0X2fa1f) #
): #
return True
return False
def A ( __UpperCamelCase ) -> str:
# word like '180' or '身高' or '神'
for char in word:
A__ = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def A ( __UpperCamelCase ) -> str:
A__ = set()
for token in tokens:
A__ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
A__ = list(__UpperCamelCase )
return word_list
def A ( __UpperCamelCase , __UpperCamelCase ) -> str:
if not chinese_word_set:
return bert_tokens
A__ = max([len(__UpperCamelCase ) for w in chinese_word_set] )
A__ = bert_tokens
A__ , A__ = 0, len(__UpperCamelCase )
while start < end:
A__ = True
if is_chinese(bert_word[start] ):
A__ = min(end - start , __UpperCamelCase )
for i in range(__UpperCamelCase , 1 , -1 ):
A__ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
A__ = '##' + bert_word[j]
A__ = start + i
A__ = False
break
if single_word:
start += 1
return bert_word
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = []
for i in range(0 , len(__UpperCamelCase ) , 100 ):
A__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['cws'] ).cws
A__ = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A__ = []
for i in range(0 , len(__UpperCamelCase ) , 100 ):
A__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__UpperCamelCase , truncation=__UpperCamelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A__ = []
for input_ids, chinese_word in zip(__UpperCamelCase , __UpperCamelCase ):
A__ = []
for id in input_ids:
A__ = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
A__ = add_sub_symbol(__UpperCamelCase , __UpperCamelCase )
A__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
A__ = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def A ( __UpperCamelCase ) -> List[str]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
A__ = f.readlines()
A__ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A__ = LTP(args.ltp ) # faster in GPU device
A__ = BertTokenizer.from_pretrained(args.bert )
A__ = prepare_ref(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
A__ = [json.dumps(__UpperCamelCase ) + '\n' for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 9
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Optional[Any] = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
__UpperCamelCase : Dict = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
__UpperCamelCase : int = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
__UpperCamelCase : List[str] = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__UpperCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 80
| 0
|
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
SCREAMING_SNAKE_CASE = object()
# For specifying empty leaf dict `{}`
SCREAMING_SNAKE_CASE = object()
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ):
"""simple docstring"""
_lowercase : Optional[int] = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__UpperCamelCase ) - len(__UpperCamelCase ) + 1 ):
_lowercase : Tuple = [x.match(__UpperCamelCase ) for x, y in zip(__UpperCamelCase ,ks[i:] )]
if matches and all(__UpperCamelCase ):
return True
return False
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
def replace(__UpperCAmelCase ,__UpperCAmelCase ):
for rule, replacement in rules:
if _match(__UpperCamelCase ,__UpperCamelCase ):
return replacement
return val
return replace
def __lowerCAmelCase( ):
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' ,__UpperCamelCase )),
(("transformer", "wte", "embedding"), P('mp' ,__UpperCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__UpperCamelCase ,'mp' )),
(("attention", "out_proj", "kernel"), P('mp' ,__UpperCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__UpperCamelCase ,'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' ,__UpperCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
_lowercase : Optional[Any] = _get_partition_rules()
_lowercase : Dict = _replacement_rules(__UpperCamelCase )
_lowercase : Optional[int] = {k: _unmatched for k in flatten_dict(__UpperCamelCase )}
_lowercase : Tuple = {k: replace(__UpperCamelCase ,__UpperCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__UpperCamelCase ) )
| 711
|
"""simple docstring"""
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
stooge(__UpperCAmelCase ,0 ,len(__UpperCAmelCase ) - 1 )
return arr
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ):
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_lowercase , _lowercase : List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_lowercase : Optional[int] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__UpperCAmelCase ,__UpperCAmelCase ,(h - t) )
# Recursively sort last 2/3 elements
stooge(__UpperCAmelCase ,i + t ,(__UpperCAmelCase) )
# Recursively sort first 2/3 elements
stooge(__UpperCAmelCase ,__UpperCAmelCase ,(h - t) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 283
| 0
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCamelCase__ = LEDConfig
lowerCamelCase__ = {}
lowerCamelCase__ = "gelu"
def __init__( self : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str]=13 , __lowerCamelCase : str=7 , __lowerCamelCase : Any=True , __lowerCamelCase : str=False , __lowerCamelCase : Optional[Any]=99 , __lowerCamelCase : Any=32 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : str=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Any=20 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Dict=4 , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
SCREAMING_SNAKE_CASE = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
SCREAMING_SNAKE_CASE = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = tf.concat(
[tf.zeros_like(__lowerCamelCase )[:, :-1], tf.ones_like(__lowerCamelCase )[:, -1:]] , axis=-1 , )
SCREAMING_SNAKE_CASE = global_attention_mask
return config, inputs_dict
def _snake_case ( self : int , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = TFLEDModel(config=__lowerCamelCase ).get_decoder()
SCREAMING_SNAKE_CASE = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE = input_ids[:1, :]
SCREAMING_SNAKE_CASE = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE = 1
# first forward pass
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1e-3 )
def __a ( A__ : int , A__ : Dict , A__ : List[str] , A__ : int=None , A__ : List[Any]=None , A__ : Optional[int]=None , A__ : Dict=None , ):
if attention_mask is None:
SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCamelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase__ = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = TFLEDModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase )
def _snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = tf.zeros_like(inputs_dict["attention_mask"] )
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.model_tester.seq_length
SCREAMING_SNAKE_CASE = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_attentions]
SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def _snake_case ( self : List[Any] ):
pass
def _snake_case ( self : List[str] ):
# TODO: Head-masking not yet implement
pass
def __a ( A__ : Any ):
return tf.constant(A__ , dtype=tf.intaa )
__A : Optional[int] = 1e-4
@slow
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
SCREAMING_SNAKE_CASE = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
SCREAMING_SNAKE_CASE = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE = (1, 1024, 768)
self.assertEqual(output.shape , __lowerCamelCase )
# change to expected output here
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-3 )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
SCREAMING_SNAKE_CASE = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
SCREAMING_SNAKE_CASE = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __lowerCamelCase )
# change to expected output here
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-3 , rtol=1e-3 )
| 16
|
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = features.copy() if features else default_expected_features
_snake_case = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case = text_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case = [text_path]
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=("train",) ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
_snake_case = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case = TextDatasetReader({"train": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_snake_case = {"text": "string"}
_snake_case = features.copy() if features else default_expected_features
_snake_case = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case = TextDatasetReader({"train": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if split:
_snake_case = {split: text_path}
else:
_snake_case = "train"
_snake_case = {"train": text_path, "test": text_path}
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 672
| 0
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = IFInpaintingSuperResolutionPipeline
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
UpperCAmelCase__ = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
return self._get_superresolution_dummy_components()
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Any=0 ) -> Dict:
if str(__snake_case ).startswith("""mps""" ):
__magic_name__: int = torch.manual_seed(__snake_case )
else:
__magic_name__: List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__: Tuple = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: List[str] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase__ ( self : Dict ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase__ ( self : int ) -> str:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
self._test_save_load_local()
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 706
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "SpeechT5FeatureExtractor"
UpperCAmelCase__ = "SpeechT5Tokenizer"
def __init__( self : List[Any] , __snake_case : Dict , __snake_case : Union[str, Any] ) -> Tuple:
super().__init__(__snake_case , __snake_case )
def __call__( self : Tuple , *__snake_case : Tuple , **__snake_case : Tuple ) -> Any:
__magic_name__: List[Any] = kwargs.pop("""audio""" , __snake_case )
__magic_name__: Optional[int] = kwargs.pop("""text""" , __snake_case )
__magic_name__: Tuple = kwargs.pop("""text_target""" , __snake_case )
__magic_name__: List[str] = kwargs.pop("""audio_target""" , __snake_case )
__magic_name__: Dict = kwargs.pop("""sampling_rate""" , __snake_case )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
__magic_name__: str = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
elif text is not None:
__magic_name__: List[str] = self.tokenizer(__snake_case , **__snake_case )
else:
__magic_name__: Tuple = None
if audio_target is not None:
__magic_name__: List[str] = self.feature_extractor(audio_target=__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
__magic_name__: Any = targets["""input_values"""]
elif text_target is not None:
__magic_name__: List[str] = self.tokenizer(__snake_case , **__snake_case )
__magic_name__: Dict = targets["""input_ids"""]
else:
__magic_name__: Union[str, Any] = None
if inputs is None:
return targets
if targets is not None:
__magic_name__: Optional[int] = labels
__magic_name__: Optional[int] = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
__magic_name__: Tuple = decoder_attention_mask
return inputs
def lowerCamelCase__ ( self : Tuple , *__snake_case : Dict , **__snake_case : int ) -> List[str]:
__magic_name__: List[Any] = kwargs.pop("""input_values""" , __snake_case )
__magic_name__: Any = kwargs.pop("""input_ids""" , __snake_case )
__magic_name__: Tuple = kwargs.pop("""labels""" , __snake_case )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
__magic_name__: Tuple = self.feature_extractor.pad(__snake_case , *__snake_case , **__snake_case )
elif input_ids is not None:
__magic_name__: int = self.tokenizer.pad(__snake_case , **__snake_case )
else:
__magic_name__: Any = None
if labels is not None:
if "input_ids" in labels or (isinstance(__snake_case , __snake_case ) and "input_ids" in labels[0]):
__magic_name__: Union[str, Any] = self.tokenizer.pad(__snake_case , **__snake_case )
__magic_name__: Any = targets["""input_ids"""]
else:
__magic_name__: Optional[Any] = self.feature_extractor.feature_size
__magic_name__: Optional[int] = self.feature_extractor.num_mel_bins
__magic_name__: str = self.feature_extractor.pad(__snake_case , *__snake_case , **__snake_case )
__magic_name__: Tuple = feature_size_hack
__magic_name__: Tuple = targets["""input_values"""]
else:
__magic_name__: int = None
if inputs is None:
return targets
if targets is not None:
__magic_name__: List[Any] = labels
__magic_name__: Dict = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
__magic_name__: Tuple = decoder_attention_mask
return inputs
def lowerCamelCase__ ( self : Dict , *__snake_case : Optional[int] , **__snake_case : Union[str, Any] ) -> Any:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCamelCase__ ( self : List[str] , *__snake_case : List[str] , **__snake_case : str ) -> Union[str, Any]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
| 213
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _snake_case ( _snake_case , _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = CycleDiffusionPipeline
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {'latents'}
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
SCREAMING_SNAKE_CASE__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
a :str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
a :Tuple = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
torch.manual_seed(0 )
a :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
a :List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
a :str = CLIPTextModel(_lowerCamelCase )
a :Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
a :Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=0 ):
a :str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
a :Optional[Any] = image / 2 + 0.5
if str(_lowerCamelCase ).startswith('''mps''' ):
a :int = torch.manual_seed(_lowerCamelCase )
else:
a :Optional[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
a :Optional[int] = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a :Tuple = self.get_dummy_components()
a :int = CycleDiffusionPipeline(**_lowerCamelCase )
a :int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :Any = self.get_dummy_inputs(_lowerCamelCase )
a :int = pipe(**_lowerCamelCase )
a :Optional[Any] = output.images
a :Union[str, Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
a :Tuple = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowerCamelCase , '''half''' ):
a :List[Any] = module.half()
a :Optional[Any] = CycleDiffusionPipeline(**_lowerCamelCase )
a :List[Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :List[str] = self.get_dummy_inputs(_lowerCamelCase )
a :Optional[int] = pipe(**_lowerCamelCase )
a :List[Any] = output.images
a :Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
a :Optional[Any] = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def SCREAMING_SNAKE_CASE__ ( self ):
return super().test_inference_batch_single_identical()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ):
return super().test_save_load_optional_components()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
a :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
a :Optional[int] = init_image.resize((512, 512) )
a :int = '''CompVis/stable-diffusion-v1-4'''
a :Any = DDIMScheduler.from_pretrained(_lowerCamelCase , subfolder='''scheduler''' )
a :Any = CycleDiffusionPipeline.from_pretrained(
_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
a :Any = '''A black colored car'''
a :Optional[int] = '''A blue colored car'''
a :str = torch.manual_seed(0 )
a :Any = pipe(
prompt=_lowerCamelCase , source_prompt=_lowerCamelCase , image=_lowerCamelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCamelCase , output_type='''np''' , )
a :Any = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
a :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
a :Any = init_image.resize((512, 512) )
a :Optional[int] = '''CompVis/stable-diffusion-v1-4'''
a :List[str] = DDIMScheduler.from_pretrained(_lowerCamelCase , subfolder='''scheduler''' )
a :List[str] = CycleDiffusionPipeline.from_pretrained(_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
a :List[Any] = '''A black colored car'''
a :str = '''A blue colored car'''
a :str = torch.manual_seed(0 )
a :str = pipe(
prompt=_lowerCamelCase , source_prompt=_lowerCamelCase , image=_lowerCamelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCamelCase , output_type='''np''' , )
a :Any = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 445
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : List[Any] = logging.get_logger(__name__)
snake_case : Dict = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'visual_bert'
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=512 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , **_lowerCamelCase , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
a :Any = vocab_size
a :str = max_position_embeddings
a :str = hidden_size
a :List[Any] = visual_embedding_dim
a :str = num_hidden_layers
a :Optional[int] = num_attention_heads
a :int = intermediate_size
a :int = hidden_act
a :Union[str, Any] = hidden_dropout_prob
a :List[Any] = attention_probs_dropout_prob
a :int = initializer_range
a :List[Any] = type_vocab_size
a :str = layer_norm_eps
a :Optional[int] = bypass_transformer
a :str = special_visual_initialize
| 445
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 716
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowercase ( A_ )-> tuple:
'''simple docstring'''
return (data["data"], data["target"])
def lowercase ( A_ , A_ )-> XGBClassifier:
'''simple docstring'''
a : Optional[Any] = XGBClassifier()
classifier.fit(A_ , A_ )
return classifier
def lowercase ( )-> None:
'''simple docstring'''
a : Dict = load_iris()
a , a : Dict = data_handling(A_ )
a , a , a , a : Optional[int] = train_test_split(
A_ , A_ , test_size=0.2_5 )
a : int = iris["target_names"]
# Create an XGBoost Classifier from the training data
a : int = xgboost(A_ , A_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
A_ , A_ , A_ , display_labels=A_ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 135
| 0
|
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__lowerCAmelCase : Tuple =HfArgumentParser(InitializationArguments)
__lowerCAmelCase : Tuple =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__lowerCAmelCase : Dict =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__lowerCAmelCase : int ={
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
__lowerCAmelCase : Optional[int] =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__lowerCAmelCase : Tuple =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 440
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """blip_2_vision_model"""
def __init__( self :List[Any] , lowercase_ :List[str]=14_08 , lowercase_ :int=61_44 , lowercase_ :List[str]=39 , lowercase_ :List[Any]=16 , lowercase_ :Union[str, Any]=2_24 , lowercase_ :List[str]=14 , lowercase_ :Dict="gelu" , lowercase_ :Tuple=0.0_0_0_0_1 , lowercase_ :Dict=0.0 , lowercase_ :Optional[int]=1E-10 , lowercase_ :Optional[Any]=True , **lowercase_ :List[str] , )-> int:
super().__init__(**lowercase_ )
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
A__ = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls :Optional[Any] , lowercase_ :Union[str, os.PathLike] , **lowercase_ :Optional[Any] )-> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
A__, A__ = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
A__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase_ , **lowercase_ )
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """blip_2_qformer"""
def __init__( self :Dict , lowercase_ :int=3_05_22 , lowercase_ :Union[str, Any]=7_68 , lowercase_ :Tuple=12 , lowercase_ :Union[str, Any]=12 , lowercase_ :str=30_72 , lowercase_ :Any="gelu" , lowercase_ :Optional[int]=0.1 , lowercase_ :List[str]=0.1 , lowercase_ :Dict=5_12 , lowercase_ :Optional[int]=0.0_2 , lowercase_ :Dict=1E-12 , lowercase_ :List[Any]=0 , lowercase_ :str="absolute" , lowercase_ :List[str]=2 , lowercase_ :Tuple=14_08 , **lowercase_ :Tuple , )-> List[Any]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = cross_attention_frequency
A__ = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls :int , lowercase_ :Union[str, os.PathLike] , **lowercase_ :str )-> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
A__, A__ = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
A__ = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase_ , **lowercase_ )
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """blip-2"""
__lowercase = True
def __init__( self :int , lowercase_ :List[Any]=None , lowercase_ :Dict=None , lowercase_ :Tuple=None , lowercase_ :List[str]=32 , **lowercase_ :Any )-> Union[str, Any]:
super().__init__(**lowercase_ )
if vision_config is None:
A__ = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
A__ = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
A__ = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
A__ = BlipaVisionConfig(**lowercase_ )
A__ = BlipaQFormerConfig(**lowercase_ )
A__ = text_config["model_type"] if "model_type" in text_config else "opt"
A__ = CONFIG_MAPPING[text_model_type](**lowercase_ )
A__ = self.text_config.tie_word_embeddings
A__ = self.text_config.is_encoder_decoder
A__ = num_query_tokens
A__ = self.vision_config.hidden_size
A__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
A__ = 1.0
A__ = 0.0_2
@classmethod
def UpperCAmelCase_ ( cls :Optional[Any] , lowercase_ :BlipaVisionConfig , lowercase_ :BlipaQFormerConfig , lowercase_ :PretrainedConfig , **lowercase_ :Any , )-> Optional[int]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def UpperCAmelCase_ ( self :Dict )-> Any:
A__ = copy.deepcopy(self.__dict__ )
A__ = self.vision_config.to_dict()
A__ = self.qformer_config.to_dict()
A__ = self.text_config.to_dict()
A__ = self.__class__.model_type
return output
| 440
| 1
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowerCamelCase_ ( _lowercase , _lowercase ) -> np.array:
__A : List[str] = F"{sampling_rate}"
__A : List[Any] = "1"
__A : Optional[int] = "f32le"
__A : Union[str, Any] = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(_lowercase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__A : Optional[Any] = ffmpeg_process.communicate(_lowercase )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
__A : Dict = output_stream[0]
__A : List[Any] = np.frombuffer(_lowercase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase = "f32le" , ) -> str:
__A : int = F"{sampling_rate}"
__A : List[str] = "1"
if format_for_conversion == "s16le":
__A : List[Any] = 2
elif format_for_conversion == "f32le":
__A : str = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
__A : Optional[Any] = platform.system()
if system == "Linux":
__A : Optional[Any] = "alsa"
__A : Optional[Any] = "default"
elif system == "Darwin":
__A : Union[str, Any] = "avfoundation"
__A : Optional[Any] = ":0"
elif system == "Windows":
__A : Dict = "dshow"
__A : Union[str, Any] = "default"
__A : Any = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
__A : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__A : Dict = _ffmpeg_stream(_lowercase , _lowercase )
for item in iterator:
yield item
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = "f32le" , ) -> str:
if stream_chunk_s is not None:
__A : Union[str, Any] = stream_chunk_s
else:
__A : Any = chunk_length_s
__A : List[Any] = ffmpeg_microphone(_lowercase , _lowercase , format_for_conversion=_lowercase )
if format_for_conversion == "s16le":
__A : Optional[Any] = np.intaa
__A : str = 2
elif format_for_conversion == "f32le":
__A : int = np.floataa
__A : Optional[Any] = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
if stride_length_s is None:
__A : Optional[Any] = chunk_length_s / 6
__A : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_lowercase , (int, float) ):
__A : Any = [stride_length_s, stride_length_s]
__A : Dict = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__A : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__A : str = datetime.datetime.now()
__A : Any = datetime.timedelta(seconds=_lowercase )
for item in chunk_bytes_iter(_lowercase , _lowercase , stride=(stride_left, stride_right) , stream=_lowercase ):
# Put everything back in numpy scale
__A : List[Any] = np.frombuffer(item["raw"] , dtype=_lowercase )
__A : Any = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
__A : Tuple = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase , _lowercase = False ) -> Optional[Any]:
__A : Optional[int] = B""
__A , __A : Optional[Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" )
__A : int = 0
for raw in iterator:
acc += raw
if stream and len(_lowercase ) < chunk_len:
__A : Dict = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_lowercase ) >= chunk_len:
# We are flushing the accumulator
__A : List[str] = (_stride_left, stride_right)
__A : int = {"raw": acc[:chunk_len], "stride": stride}
if stream:
__A : str = False
yield item
__A : Tuple = stride_left
__A : Optional[int] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_lowercase ) > stride_left:
__A : List[Any] = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
__A : Tuple = False
yield item
def lowerCamelCase_ ( _lowercase , _lowercase ) -> str:
__A : Tuple = 2**24 # 16Mo
try:
with subprocess.Popen(_lowercase , stdout=subprocess.PIPE , bufsize=_lowercase ) as ffmpeg_process:
while True:
__A : Union[str, Any] = ffmpeg_process.stdout.read(_lowercase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 387
|
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=18 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=False , ):
__A : Tuple = size if size is not None else {"height": 20, "width": 20}
__A : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18}
__A : int = parent
__A : List[Any] = batch_size
__A : Tuple = num_channels
__A : Any = image_size
__A : Optional[int] = min_resolution
__A : Any = max_resolution
__A : str = do_resize
__A : Tuple = size
__A : Tuple = do_center_crop
__A : Union[str, Any] = crop_size
__A : Tuple = do_normalize
__A : Union[str, Any] = image_mean
__A : Dict = image_std
__A : Optional[Any] = do_reduce_labels
def __UpperCAmelCase( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCamelCase_ ( ) -> str:
__A : List[str] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A : Optional[Any] = Image.open(dataset[0]["file"] )
__A : Union[str, Any] = Image.open(dataset[1]["file"] )
return image, map
def lowerCamelCase_ ( ) -> Dict:
__A : str = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A : List[Any] = Image.open(ds[0]["file"] )
__A : Union[str, Any] = Image.open(ds[1]["file"] )
__A : Optional[Any] = Image.open(ds[2]["file"] )
__A : str = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Tuple = BeitImageProcessor if is_vision_available() else None
def __UpperCAmelCase( self ):
__A : Tuple = BeitImageProcessingTester(self )
@property
def __UpperCAmelCase( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase( self ):
__A : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_center_crop" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "center_crop" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "image_std" ) )
def __UpperCAmelCase( self ):
__A : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels , __UpperCAmelCase )
__A : str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__UpperCAmelCase )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels , __UpperCAmelCase )
def __UpperCAmelCase( self ):
pass
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
__A : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__A : List[Any] = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__A : List[Any] = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
__A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__A : Union[str, Any] = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
__A : Tuple = []
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__A : str = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched
__A : Any = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test not batched input (PIL images)
__A , __A : Optional[Any] = prepare_semantic_single_inputs()
__A : Dict = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched input (PIL images)
__A , __A : List[Any] = prepare_semantic_batch_inputs()
__A : Tuple = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__A , __A : List[Any] = prepare_semantic_single_inputs()
__A : Optional[int] = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 150 )
__A : Optional[Any] = True
__A : int = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
| 387
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : str = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 107
|
'''simple docstring'''
import datasets
from .evaluate import evaluate
__SCREAMING_SNAKE_CASE : Optional[Any] = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
__SCREAMING_SNAKE_CASE : Any = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
__SCREAMING_SNAKE_CASE : str = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ (datasets.Metric ):
'''simple docstring'''
def _A ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def _A ( self : List[str] , A : Dict , A : Optional[Any] ):
_UpperCAmelCase : Tuple = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
_UpperCAmelCase : List[str] = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
_UpperCAmelCase : Optional[int] = evaluate(dataset=A , predictions=A )
return score
| 244
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ : List[Any] = 'openai/whisper-base'
UpperCAmelCase_ : int = (
'This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '
'transcribed text.'
)
UpperCAmelCase_ : Tuple = 'transcriber'
UpperCAmelCase_ : Dict = WhisperProcessor
UpperCAmelCase_ : Tuple = WhisperForConditionalGeneration
UpperCAmelCase_ : int = ['audio']
UpperCAmelCase_ : List[str] = ['text']
def _lowerCamelCase ( self , lowercase__ ) -> List[str]:
return self.pre_processor(lowercase__ , return_tensors="pt" ).input_features
def _lowerCamelCase ( self , lowercase__ ) -> List[Any]:
return self.model.generate(inputs=lowercase__ )
def _lowerCamelCase ( self , lowercase__ ) -> str:
return self.pre_processor.batch_decode(lowercase__ , skip_special_tokens=lowercase__ )[0]
| 205
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case_ : str ={
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict =['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] =[
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] =[
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
snake_case_ : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 205
| 1
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCAmelCase_ = logging.get_logger(__name__)
@dataclass
class __magic_name__ ( __a ):
"""simple docstring"""
lowerCAmelCase : Dict = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : Optional[int] , **_lowercase : Dict ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_UpperCamelCase: Optional[int] = deprecated_arg[3:]
_UpperCamelCase: List[str] = not kwargs.pop(_lowercase )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
_UpperCamelCase: Any = kwargs.pop('''tpu_name''' , self.tpu_name )
_UpperCamelCase: List[Any] = kwargs.pop('''device_idx''' , self.device_idx )
_UpperCamelCase: Union[str, Any] = kwargs.pop('''eager_mode''' , self.eager_mode )
_UpperCamelCase: Dict = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**_lowercase )
lowerCAmelCase : str = field(
default=__a , metadata={'''help''': '''Name of TPU'''} , )
lowerCAmelCase : int = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
lowerCAmelCase : bool = field(default=__a , metadata={'''help''': '''Benchmark models in eager model.'''} )
lowerCAmelCase : bool = field(
default=__a , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def lowerCAmelCase ( self : str ):
"""simple docstring"""
requires_backends(self , ['''tf'''] )
_UpperCamelCase: Tuple = None
if self.tpu:
try:
if self.tpu_name:
_UpperCamelCase: Tuple = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_UpperCamelCase: int = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_UpperCamelCase: List[Any] = None
return tpu
@cached_property
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_UpperCamelCase: Optional[Any] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
_UpperCamelCase: Any = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
_UpperCamelCase: str = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowerCAmelCase ( self : str ):
"""simple docstring"""
return self.n_gpu > 0
| 271
|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( lowercase: str = "" ) -> dict[str, float]:
'''simple docstring'''
_UpperCamelCase: Tuple = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
_UpperCamelCase: Union[str, Any] = BeautifulSoup(requests.get(lowercase ).text , '''html.parser''' )
_UpperCamelCase: List[Any] = soup.find_all('''td''' , attrs='''titleColumn''' )
_UpperCamelCase: str = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowercase , lowercase )
}
def lowerCAmelCase_ ( lowercase: str = "IMDb_Top_250_Movies.csv" ) -> None:
'''simple docstring'''
_UpperCamelCase: Any = get_imdb_top_aaa_movies()
with open(lowercase , '''w''' , newline='''''' ) as out_file:
_UpperCamelCase: Optional[Any] = csv.writer(lowercase )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 271
| 1
|
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'spiece.model'}
snake_case_ = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
snake_case_ = {
'AI-Sweden/gpt-sw3-126m': 2_0_4_8,
'AI-Sweden/gpt-sw3-350m': 2_0_4_8,
'AI-Sweden/gpt-sw3-1.6b': 2_0_4_8,
'AI-Sweden/gpt-sw3-6.7b': 2_0_4_8,
'AI-Sweden/gpt-sw3-20b': 2_0_4_8,
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
def __init__( self , lowercase__ , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE_ : Dict = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
SCREAMING_SNAKE_CASE_ : str = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
SCREAMING_SNAKE_CASE_ : List[Any] = "<|endoftext|>" if eos_token is None else eos_token
SCREAMING_SNAKE_CASE_ : Dict = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
SCREAMING_SNAKE_CASE_ : Tuple = unk_token if pad_token is None else pad_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = eos_token if bos_token is None else bos_token
else:
SCREAMING_SNAKE_CASE_ : int = "<pad>" if pad_token is None else pad_token
SCREAMING_SNAKE_CASE_ : Any = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowercase__ , remove_space=lowercase__ , keep_accents=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE_ : Optional[int] = remove_space
SCREAMING_SNAKE_CASE_ : int = keep_accents
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_file
SCREAMING_SNAKE_CASE_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
# Used for whitespace normalization in input texts
# fmt : off
SCREAMING_SNAKE_CASE_ : int = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
F"[{''.join(map(lowercase__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]" )
def __getstate__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Dict = None
return state
def __setstate__( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowerCamelCase ( self ):
"""simple docstring"""
return len(self.sp_model )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.non_printing_characters_re.sub("" , lowercase__ )
# Normalize whitespaces
SCREAMING_SNAKE_CASE_ : List[str] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
SCREAMING_SNAKE_CASE_ : List[Any] = unicodedata.normalize("NFC" , lowercase__ )
return text
def __lowerCamelCase ( self , lowercase__ , **lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.preprocess_text(lowercase__ )
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.PieceToId(lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowercase__ )
@staticmethod
def __lowerCamelCase ( lowercase__ ):
"""simple docstring"""
return out_string
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Any = ""
SCREAMING_SNAKE_CASE_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = []
else:
current_sub_tokens.append(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = False
out_string += self.sp_model.decode(lowercase__ )
return out_string
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
lowercase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , "wb" ) as fi:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
def __lowerCamelCase ( self , lowercase__ , lowercase__ = False ):
"""simple docstring"""
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.preprocess_text(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = self.sp_model.encode(lowercase__ )
else:
SCREAMING_SNAKE_CASE_ : str = [self.preprocess_text(lowercase__ ) for t in text]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.encode(lowercase__ )
if return_tensors is True or return_tensors == "pt":
SCREAMING_SNAKE_CASE_ : str = torch.tensor(lowercase__ )
return token_ids
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.decode(lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [F"User: {text}" if is_user else F"Bot: {text}" for is_user, text in conversation.iter_texts()]
SCREAMING_SNAKE_CASE_ : List[str] = (
F"{self.eos_token}{self.bos_token}" + F"{self.bos_token}".join(lowercase__ ) + F"{self.bos_token}Bot:"
)
return self.encode(text=lowercase__ )
| 714
|
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : list[int] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if numbers[j] < numbers[i]:
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
snake_case_ = input('Enter numbers separated by a comma:\n').strip()
snake_case_ = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 68
| 0
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowercase , int(b / 2 ) ) * actual_power(lowercase , int(b / 2 ) )
else:
return a * actual_power(lowercase , int(b / 2 ) ) * actual_power(lowercase , int(b / 2 ) )
def __magic_name__ ( lowercase , lowercase ):
if b < 0:
return 1 / actual_power(lowercase , lowercase )
return actual_power(lowercase , lowercase )
if __name__ == "__main__":
print(power(-2, -3))
| 409
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """camembert"""
def __init__( self , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1e-1_2 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , **A , ) -> Any:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
snake_case : int = vocab_size
snake_case : Dict = hidden_size
snake_case : Any = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : Any = hidden_act
snake_case : List[str] = intermediate_size
snake_case : List[str] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Dict = max_position_embeddings
snake_case : str = type_vocab_size
snake_case : List[Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Optional[int] = position_embedding_type
snake_case : Tuple = use_cache
snake_case : Tuple = classifier_dropout
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 587
| 0
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _lowercase ( ):
_a = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
_a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" )
return image
def _lowercase ( lowerCamelCase__ : Optional[int] ):
_a = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict ):
_a = dct.pop(lowerCamelCase__ )
_a = val
def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int] ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_a = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
_a = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
_a = torch.cat((q_bias, torch.zeros_like(lowerCamelCase__, requires_grad=lowerCamelCase__ ), v_bias) )
_a = qkv_bias
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any] ):
_a = 364 if "coco" in model_name else 224
_a = BlipaVisionConfig(image_size=lowerCamelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_a = OPTConfig.from_pretrained("facebook/opt-2.7b", eos_token_id=lowerCamelCase__ ).to_dict()
elif "opt-6.7b" in model_name:
_a = OPTConfig.from_pretrained("facebook/opt-6.7b", eos_token_id=lowerCamelCase__ ).to_dict()
elif "t5-xl" in model_name:
_a = TaConfig.from_pretrained("google/flan-t5-xl", dense_act_fn="gelu", bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_a = TaConfig.from_pretrained("google/flan-t5-xxl", dense_act_fn="gelu", bos_token_id=1 ).to_dict()
_a = BlipaConfig(vision_config=lowerCamelCase__, text_config=lowerCamelCase__ )
return config, image_size
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Dict=None, lowerCamelCase__ : List[Any]=False ):
_a = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
_a = tokenizer("\n", add_special_tokens=lowerCamelCase__ ).input_ids[0]
_a , _a = get_blipa_config(lowerCamelCase__, eos_token_id=lowerCamelCase__ )
_a = BlipaForConditionalGeneration(lowerCamelCase__ ).eval()
_a = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
_a , _a = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
_a = "cuda" if torch.cuda.is_available() else "cpu"
_a , _a , _a = load_model_and_preprocess(
name=lowerCamelCase__, model_type=lowerCamelCase__, is_eval=lowerCamelCase__, device=lowerCamelCase__ )
original_model.eval()
print("Done!" )
# update state dict keys
_a = original_model.state_dict()
_a = create_rename_keys(lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_a = state_dict.pop(lowerCamelCase__ )
if key.startswith("Qformer.bert" ):
_a = key.replace("Qformer.bert", "qformer" )
if "attention.self" in key:
_a = key.replace("self", "attention" )
if "opt_proj" in key:
_a = key.replace("opt_proj", "language_projection" )
if "t5_proj" in key:
_a = key.replace("t5_proj", "language_projection" )
if key.startswith("opt" ):
_a = key.replace("opt", "language" )
if key.startswith("t5" ):
_a = key.replace("t5", "language" )
_a = val
# read in qv biases
read_in_q_v_bias(lowerCamelCase__, lowerCamelCase__ )
_a , _a = hf_model.load_state_dict(lowerCamelCase__, strict=lowerCamelCase__ )
assert len(lowerCamelCase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_a = load_demo_image()
_a = vis_processors["eval"](lowerCamelCase__ ).unsqueeze(0 ).to(lowerCamelCase__ )
_a = tokenizer(["\n"], return_tensors="pt" ).input_ids.to(lowerCamelCase__ )
# create processor
_a = BlipImageProcessor(
size={"height": image_size, "width": image_size}, image_mean=lowerCamelCase__, image_std=lowerCamelCase__ )
_a = BlipaProcessor(image_processor=lowerCamelCase__, tokenizer=lowerCamelCase__ )
_a = processor(images=lowerCamelCase__, return_tensors="pt" ).pixel_values.to(lowerCamelCase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(lowerCamelCase__, lowerCamelCase__ )
original_model.to(lowerCamelCase__ )
hf_model.to(lowerCamelCase__ )
with torch.no_grad():
if "opt" in model_name:
_a = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
_a = hf_model(lowerCamelCase__, lowerCamelCase__ ).logits
else:
_a = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
_a = input_ids.masked_fill(input_ids == tokenizer.pad_token_id, -100 )
_a = hf_model(lowerCamelCase__, lowerCamelCase__, labels=lowerCamelCase__ ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:", original_logits[0, :3, :3] )
print("First values of HF logits:", logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_a = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]], device=lowerCamelCase__ )
assert torch.allclose(logits[0, :3, :3], lowerCamelCase__, atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_a = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]], device=lowerCamelCase__ )
else:
# cast to same type
_a = logits.dtype
assert torch.allclose(original_logits.to(lowerCamelCase__ ), lowerCamelCase__, atol=1e-2 )
print("Looks ok!" )
print("Generating a caption..." )
_a = ""
_a = tokenizer(lowerCamelCase__, return_tensors="pt" ).input_ids.to(lowerCamelCase__ )
_a = original_model.generate({"image": original_pixel_values} )
_a = hf_model.generate(
lowerCamelCase__, lowerCamelCase__, do_sample=lowerCamelCase__, num_beams=5, max_length=30, min_length=1, top_p=0.9, repetition_penalty=1.0, length_penalty=1.0, temperature=1, )
print("Original generation:", lowerCamelCase__ )
_a = input_ids.shape[1]
_a = processor.batch_decode(outputs[:, prompt_length:], skip_special_tokens=lowerCamelCase__ )
_a = [text.strip() for text in output_text]
print("HF generation:", lowerCamelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowerCamelCase__ )
hf_model.save_pretrained(lowerCamelCase__ )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
__snake_case : Tuple = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
__snake_case : Tuple = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 691
|
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict=0.9_99, lowerCamelCase__ : Union[str, Any]="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase__ : List[Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase__ : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_a = []
for i in range(lowerCamelCase__ ):
_a = i / num_diffusion_timesteps
_a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ), lowerCamelCase__ ) )
return torch.tensor(lowerCamelCase__, dtype=torch.floataa )
class A ( a , a ):
__UpperCAmelCase : int = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Optional[int] = 2
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.00_085 , snake_case_ = 0.012 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = "epsilon" , snake_case_ = "linspace" , snake_case_ = 0 , ) -> Optional[int]:
if trained_betas is not None:
_a = torch.tensor(snake_case_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_a = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a = betas_for_alpha_bar(snake_case_ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_a = 1.0 - self.betas
_a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case_ , snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Dict:
if schedule_timesteps is None:
_a = self.timesteps
_a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a = 1 if len(snake_case_ ) > 1 else 0
else:
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
_a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCAmelCase ( self ) -> Dict:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
_a = self.index_for_timestep(snake_case_ )
if self.state_in_first_order:
_a = self.sigmas[step_index]
else:
_a = self.sigmas_interpol[step_index]
_a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ) -> Union[str, Any]:
_a = num_inference_steps
_a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a = torch.from_numpy(np.log(snake_case_ ) ).to(snake_case_ )
_a = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ )
_a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a = torch.from_numpy(snake_case_ ).to(device=snake_case_ )
# interpolate sigmas
_a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(snake_case_ ).startswith("mps" ):
# mps does not support float64
_a = torch.from_numpy(snake_case_ ).to(snake_case_ , dtype=torch.floataa )
else:
_a = torch.from_numpy(snake_case_ ).to(snake_case_ )
# interpolate timesteps
_a = self.sigma_to_t(snake_case_ ).to(snake_case_ , dtype=timesteps.dtype )
_a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_a = torch.cat([timesteps[:1], interleaved_timesteps] )
_a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a = defaultdict(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
# get log sigma
_a = sigma.log()
# get distribution
_a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_a = low_idx + 1
_a = self.log_sigmas[low_idx]
_a = self.log_sigmas[high_idx]
# interpolate sigmas
_a = (low - log_sigma) / (low - high)
_a = w.clamp(0 , 1 )
# transform interpolation to time range
_a = (1 - w) * low_idx + w * high_idx
_a = t.view(sigma.shape )
return t
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self.sample is None
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Union[SchedulerOutput, Tuple]:
_a = self.index_for_timestep(snake_case_ )
# advance index counter by 1
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a = self.sigmas[step_index]
_a = self.sigmas_interpol[step_index + 1]
_a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_a = self.sigmas[step_index - 1]
_a = self.sigmas_interpol[step_index]
_a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a = 0
_a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a = sigma_interpol - sigma_hat
# store for 2nd order step
_a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_a = sigma_next - sigma_hat
_a = self.sample
_a = None
_a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ):
# mps does not support float64
_a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_a = self.timesteps.to(original_samples.device )
_a = timesteps.to(original_samples.device )
_a = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps]
_a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a = sigma.unsqueeze(-1 )
_a = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> str:
return self.config.num_train_timesteps
| 691
| 1
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase):
lowerCamelCase__ = FunnelTokenizer
lowerCamelCase__ = FunnelTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : Tuple = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def snake_case__ ( self, **__a):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname, **_lowercase)
def snake_case__ ( self, **__a):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **_lowercase)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = "UNwant\u00E9d,running"
_lowerCAmelCase : str = "unwanted, running"
return input_text, output_text
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.tokenizer_class(self.vocab_file)
_lowerCAmelCase : Optional[Any] = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(_lowercase, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase), [7, 4, 5, 10, 8, 9])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_tokenizers(do_lower_case=_lowercase)
for tokenizer in tokenizers:
_lowerCAmelCase : Union[str, Any] = tokenizer("UNwant\u00E9d,running")
_lowerCAmelCase : Optional[int] = len(inputs["input_ids"]) - 1
self.assertListEqual(inputs["token_type_ids"], [2] + [0] * sentence_len)
_lowerCAmelCase : Dict = tokenizer("UNwant\u00E9d,running", "UNwant\u00E9d,running")
self.assertListEqual(inputs["token_type_ids"], [2] + [0] * sentence_len + [1] * sentence_len)
| 500
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
_lowercase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def A ():
_lowerCAmelCase = Github(os.environ["""GITHUB_TOKEN"""] )
_lowerCAmelCase = g.get_repo("""huggingface/diffusers""" )
_lowerCAmelCase = repo.get_issues(state="""open""" )
for issue in open_issues:
_lowerCAmelCase = sorted(issue.get_comments() , key=lambda __lowerCamelCase : i.created_at , reverse=__lowerCamelCase )
_lowerCAmelCase = comments[0] if len(__lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 5
| 0
|
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowerCamelCase ( __UpperCAmelCase ):
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
_snake_case: Optional[Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_snake_case: str = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_snake_case: List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_snake_case: Dict = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(__snake_case )
BertModel.from_pretrained(__snake_case )
BertTokenizer.from_pretrained(__snake_case )
pipeline(task='fill-mask' , model=__snake_case )
# baseline - just load from_pretrained with normal network
_snake_case: Any = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_snake_case: str = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case: int = '1'
_snake_case: Any = subprocess.run(__snake_case , env=__snake_case , check=__snake_case , capture_output=__snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: Dict = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_snake_case: str = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_snake_case: Dict = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_snake_case: Optional[int] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(__snake_case )
BertModel.from_pretrained(__snake_case )
BertTokenizer.from_pretrained(__snake_case )
pipeline(task='fill-mask' , model=__snake_case )
# baseline - just load from_pretrained with normal network
_snake_case: Optional[Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_snake_case: Dict = self.get_env()
_snake_case: Any = subprocess.run(__snake_case , env=__snake_case , check=__snake_case , capture_output=__snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_snake_case: Union[str, Any] = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_snake_case: Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_snake_case: int = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_snake_case: List[str] = self.get_env()
_snake_case: Dict = subprocess.run(__snake_case , env=__snake_case , check=__snake_case , capture_output=__snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
_snake_case: int = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case: List[Any] = '1'
_snake_case: str = subprocess.run(__snake_case , env=__snake_case , check=__snake_case , capture_output=__snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: Dict = '\nfrom transformers import pipeline\n '
_snake_case: int = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_snake_case: Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_snake_case: Optional[Any] = self.get_env()
_snake_case: int = '1'
_snake_case: List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_snake_case: Tuple = subprocess.run(__snake_case , env=__snake_case , check=__snake_case , capture_output=__snake_case )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: int = '\nfrom transformers import AutoModel\n '
_snake_case: Union[str, Any] = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_snake_case: str = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_snake_case: List[str] = self.get_env()
_snake_case: List[str] = subprocess.run(__snake_case , env=__snake_case , check=__snake_case , capture_output=__snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case: Optional[int] = '1'
_snake_case: List[Any] = subprocess.run(__snake_case , env=__snake_case , check=__snake_case , capture_output=__snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 713
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowerCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Optional[int]=False ):
'''simple docstring'''
_snake_case: Dict = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class in get_values(__snake_case ):
_snake_case: List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class lowerCamelCase ( __UpperCAmelCase ):
def __init__( self : List[str] , __snake_case : int , __snake_case : Any=13 , __snake_case : Dict=7 , __snake_case : Tuple=True , __snake_case : Dict=True , __snake_case : List[Any]=True , __snake_case : Tuple=True , __snake_case : List[str]=99 , __snake_case : List[Any]=32 , __snake_case : Optional[Any]=32 , __snake_case : int=2 , __snake_case : Optional[int]=4 , __snake_case : Union[str, Any]=37 , __snake_case : List[str]="gelu" , __snake_case : Tuple=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Optional[int]=5_12 , __snake_case : Dict=16 , __snake_case : List[Any]=2 , __snake_case : List[str]=0.02 , __snake_case : List[Any]=3 , __snake_case : Any=4 , __snake_case : Tuple=None , ):
'''simple docstring'''
_snake_case: List[str] = parent
_snake_case: Any = batch_size
_snake_case: Union[str, Any] = seq_length
_snake_case: List[str] = is_training
_snake_case: Optional[int] = use_input_mask
_snake_case: Tuple = use_token_type_ids
_snake_case: Optional[int] = use_labels
_snake_case: str = vocab_size
_snake_case: str = hidden_size
_snake_case: Optional[Any] = num_hidden_layers
_snake_case: List[str] = num_attention_heads
_snake_case: str = intermediate_size
_snake_case: Optional[int] = hidden_act
_snake_case: Any = hidden_dropout_prob
_snake_case: Optional[int] = attention_probs_dropout_prob
_snake_case: Any = max_position_embeddings
_snake_case: int = type_vocab_size
_snake_case: Tuple = type_sequence_label_size
_snake_case: Optional[Any] = initializer_range
_snake_case: Tuple = num_labels
_snake_case: Optional[Any] = num_choices
_snake_case: Union[str, Any] = scope
_snake_case: Any = embedding_size
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
_snake_case: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case: List[str] = None
if self.use_input_mask:
_snake_case: List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case: List[str] = None
if self.use_token_type_ids:
_snake_case: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case: Optional[Any] = None
_snake_case: Any = None
_snake_case: int = None
if self.use_labels:
_snake_case: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case: Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_snake_case: str = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : int , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : int , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : List[str] ):
'''simple docstring'''
_snake_case: int = TFMobileBertModel(config=__snake_case )
_snake_case: str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_snake_case: List[str] = model(__snake_case )
_snake_case: List[str] = [input_ids, input_mask]
_snake_case: Dict = model(__snake_case )
_snake_case: List[str] = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : str , __snake_case : int , __snake_case : str , __snake_case : List[str] , __snake_case : Dict , __snake_case : str , __snake_case : Tuple , __snake_case : List[str] ):
'''simple docstring'''
_snake_case: str = TFMobileBertForMaskedLM(config=__snake_case )
_snake_case: Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_snake_case: Optional[int] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , __snake_case : int , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : int , __snake_case : str ):
'''simple docstring'''
_snake_case: Union[str, Any] = TFMobileBertForNextSentencePrediction(config=__snake_case )
_snake_case: List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_snake_case: List[str] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE_ ( self : int , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Dict , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
_snake_case: str = TFMobileBertForPreTraining(config=__snake_case )
_snake_case: str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_snake_case: List[str] = model(__snake_case )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : int , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Optional[int] ):
'''simple docstring'''
_snake_case: int = self.num_labels
_snake_case: Tuple = TFMobileBertForSequenceClassification(config=__snake_case )
_snake_case: Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_snake_case: List[Any] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Any , __snake_case : List[Any] , __snake_case : Any , __snake_case : Dict , __snake_case : Dict , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[str] ):
'''simple docstring'''
_snake_case: Tuple = self.num_choices
_snake_case: Optional[int] = TFMobileBertForMultipleChoice(config=__snake_case )
_snake_case: str = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
_snake_case: Union[str, Any] = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
_snake_case: Optional[int] = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
_snake_case: Optional[Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_snake_case: Optional[Any] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self : int , __snake_case : str , __snake_case : List[Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
'''simple docstring'''
_snake_case: Tuple = self.num_labels
_snake_case: Dict = TFMobileBertForTokenClassification(config=__snake_case )
_snake_case: str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_snake_case: Tuple = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , __snake_case : Any , __snake_case : Tuple , __snake_case : Dict , __snake_case : int , __snake_case : Dict , __snake_case : Dict , __snake_case : Any ):
'''simple docstring'''
_snake_case: int = TFMobileBertForQuestionAnswering(config=__snake_case )
_snake_case: Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_snake_case: Union[str, Any] = model(__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case: str = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
): Any = config_and_inputs
_snake_case: str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
_snake_case: int = TFMobileBertModelTest.TFMobileBertModelTester(self )
_snake_case: Dict = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
_snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
_snake_case: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_snake_case: Optional[Any] = TFMobileBertModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_tf
class lowerCamelCase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
_snake_case: Optional[Any] = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
_snake_case: Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
_snake_case: Optional[Any] = model(__snake_case )[0]
_snake_case: str = [1, 6, 3_05_22]
self.assertEqual(output.shape , __snake_case )
_snake_case: int = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __snake_case , atol=1e-4 )
| 273
| 0
|
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_UpperCAmelCase : str = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : List[str]=None ) -> List[str]:
'''simple docstring'''
require_version(deps[pkg] , lowercase_ )
| 72
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def UpperCamelCase__ ( lowercase__ : str , lowercase__ : List[Any] , lowercase__ : int ):
snake_case : Tuple = 0
if start < end:
snake_case : Tuple = randint(lowercase__ , lowercase__ )
snake_case : int = a[end]
snake_case : Dict = a[pivot]
snake_case : Dict = temp
snake_case , snake_case : int = _in_place_partition(lowercase__ , lowercase__ , lowercase__ )
count += _in_place_quick_sort(lowercase__ , lowercase__ , p - 1 )
count += _in_place_quick_sort(lowercase__ , p + 1 , lowercase__ )
return count
def UpperCamelCase__ ( lowercase__ : int , lowercase__ : Dict , lowercase__ : str ):
snake_case : Any = 0
snake_case : Optional[Any] = randint(lowercase__ , lowercase__ )
snake_case : Dict = a[end]
snake_case : str = a[pivot]
snake_case : List[str] = temp
snake_case : Optional[Any] = start - 1
for index in range(lowercase__ , lowercase__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case : List[str] = new_pivot_index + 1
snake_case : Tuple = a[new_pivot_index]
snake_case : str = a[index]
snake_case : Dict = temp
snake_case : Dict = a[new_pivot_index + 1]
snake_case : str = a[end]
snake_case : List[str] = temp
return new_pivot_index + 1, count
__A = TemporaryFile()
__A = 100 # 1000 elements are to be sorted
__A , __A = 0, 1 # mean and standard deviation
__A = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
__A = np.load(outfile)
__A = len(M) - 1
__A = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 134
| 0
|
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Any , __A : str , __A : Tuple=13 , __A : List[Any]=7 , __A : Any=True , __A : Union[str, Any]=True , __A : str=True , __A : List[Any]=True , __A : Union[str, Any]=99 , __A : Union[str, Any]=24 , __A : List[Any]=2 , __A : List[str]=6 , __A : Union[str, Any]=37 , __A : List[str]="gelu" , __A : Tuple=0.1 , __A : str=0.1 , __A : Union[str, Any]=512 , __A : Union[str, Any]=16 , __A : str=2 , __A : Tuple=0.0_2 , __A : int=3 , __A : Optional[int]=None , __A : str=1000 , ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = scope
lowerCAmelCase__ = range_bbox
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase__ = bbox[i, j, 3]
lowerCAmelCase__ = bbox[i, j, 1]
lowerCAmelCase__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase__ = bbox[i, j, 2]
lowerCAmelCase__ = bbox[i, j, 0]
lowerCAmelCase__ = t
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase__ ( self : str ) -> Any:
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowercase__ ( self : List[Any] , __A : Union[str, Any] , __A : str , __A : Union[str, Any] , __A : Dict , __A : List[Any] , __A : Dict , __A : str , ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = LiltModel(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase__ = model(__A , bbox=__A , attention_mask=__A , token_type_ids=__A )
lowerCAmelCase__ = model(__A , bbox=__A , token_type_ids=__A )
lowerCAmelCase__ = model(__A , bbox=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self : str , __A : Tuple , __A : Any , __A : List[Any] , __A : int , __A : Optional[int] , __A : Union[str, Any] , __A : Optional[int] , ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = LiltForTokenClassification(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase__ = model(
__A , bbox=__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : int , __A : Tuple , __A : Dict , __A : Optional[int] , __A : List[str] , __A : List[Any] , __A : str , __A : List[Any] , ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = LiltForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase__ = model(
__A , bbox=__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( _A, _A, _A, unittest.TestCase ):
'''simple docstring'''
A__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = False
A__ = False
def lowercase__ ( self : str , __A : str , __A : Optional[int] , __A : List[Any] , __A : Union[str, Any] , __A : Tuple ) -> Any:
'''simple docstring'''
return True
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = LiltModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__A , hidden_size=37 )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ = type
self.model_tester.create_and_check_model(*__A )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = LiltModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
@slow
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(__A )
lowerCAmelCase__ = torch.tensor([[1, 2]] , device=__A )
lowerCAmelCase__ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__A )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(input_ids=__A , bbox=__A )
lowerCAmelCase__ = torch.Size([1, 2, 768] )
lowerCAmelCase__ = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=__A , )
self.assertTrue(outputs.last_hidden_state.shape , __A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __A , atol=1E-3 ) )
| 720
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__ ( _A ):
'''simple docstring'''
def __init__( self : Optional[int] , __A : WhisperForConditionalGeneration , __A : WhisperProcessor , __A : AutoencoderKL , __A : CLIPTextModel , __A : CLIPTokenizer , __A : UNetaDConditionModel , __A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __A : StableDiffusionSafetyChecker , __A : CLIPImageProcessor , ) -> List[Any]:
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__A , speech_processor=__A , vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , scheduler=__A , feature_extractor=__A , )
def lowercase__ ( self : Optional[Any] , __A : Optional[Union[str, int]] = "auto" ) -> Any:
'''simple docstring'''
if slice_size == "auto":
lowerCAmelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__A )
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
self.enable_attention_slicing(__A )
@torch.no_grad()
def __call__( self : Optional[int] , __A : List[str] , __A : Tuple=1_6000 , __A : int = 512 , __A : int = 512 , __A : int = 50 , __A : float = 7.5 , __A : Optional[Union[str, List[str]]] = None , __A : Optional[int] = 1 , __A : float = 0.0 , __A : Optional[torch.Generator] = None , __A : Optional[torch.FloatTensor] = None , __A : Optional[str] = "pil" , __A : bool = True , __A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __A : int = 1 , **__A : str , ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = self.speech_processor.feature_extractor(
__A , return_tensors="""pt""" , sampling_rate=__A ).input_features.to(self.device )
lowerCAmelCase__ = self.speech_model.generate(__A , max_length=48_0000 )
lowerCAmelCase__ = self.speech_processor.tokenizer.batch_decode(__A , skip_special_tokens=__A , normalize=__A )[
0
]
if isinstance(__A , __A ):
lowerCAmelCase__ = 1
elif isinstance(__A , __A ):
lowerCAmelCase__ = len(__A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__A , __A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__A )}.''' )
# get prompt text embeddings
lowerCAmelCase__ = self.tokenizer(
__A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
lowerCAmelCase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowerCAmelCase__ = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = text_embeddings.shape
lowerCAmelCase__ = text_embeddings.repeat(1 , __A , 1 )
lowerCAmelCase__ = text_embeddings.view(bs_embed * num_images_per_prompt , __A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase__ = 42
if negative_prompt is None:
lowerCAmelCase__ = [""""""] * batch_size
elif type(__A ) is not type(__A ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__A )} !='''
f''' {type(__A )}.''' )
elif isinstance(__A , __A ):
lowerCAmelCase__ = [negative_prompt]
elif batch_size != len(__A ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__A )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
lowerCAmelCase__ = negative_prompt
lowerCAmelCase__ = text_input_ids.shape[-1]
lowerCAmelCase__ = self.tokenizer(
__A , padding="""max_length""" , max_length=__A , truncation=__A , return_tensors="""pt""" , )
lowerCAmelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase__ = uncond_embeddings.shape[1]
lowerCAmelCase__ = uncond_embeddings.repeat(1 , __A , 1 )
lowerCAmelCase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , __A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase__ = torch.randn(__A , generator=__A , device="""cpu""" , dtype=__A ).to(
self.device )
else:
lowerCAmelCase__ = torch.randn(__A , generator=__A , device=self.device , dtype=__A )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowerCAmelCase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase__ = {}
if accepts_eta:
lowerCAmelCase__ = eta
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase__ = self.scheduler.scale_model_input(__A , __A )
# predict the noise residual
lowerCAmelCase__ = self.unet(__A , __A , encoder_hidden_states=__A ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase__ ,lowerCAmelCase__ = noise_pred.chunk(2 )
lowerCAmelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ = self.scheduler.step(__A , __A , __A , **__A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__A , __A , __A )
lowerCAmelCase__ = 1 / 0.1_8_2_1_5 * latents
lowerCAmelCase__ = self.vae.decode(__A ).sample
lowerCAmelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase__ = self.numpy_to_pil(__A )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__A , nsfw_content_detected=__A )
| 211
| 0
|
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase__ ( __UpperCamelCase , unittest.TestCase ):
__UpperCAmelCase = PhobertTokenizer
__UpperCAmelCase = False
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : Any = ["""T@@""", """i""", """I""", """R@@""", """r""", """e@@"""]
lowercase : Dict = dict(zip(snake_case , range(len(snake_case ) ) ) )
lowercase : Dict = ["""#version: 0.2""", """l à</w>"""]
lowercase : Optional[Any] = {"""unk_token""": """<unk>"""}
lowercase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case ) )
def _UpperCAmelCase ( self , **snake_case ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def _UpperCAmelCase ( self , snake_case ) -> List[Any]:
"""simple docstring"""
lowercase : Dict = """Tôi là VinAI Research"""
lowercase : int = """T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"""
return input_text, output_text
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
lowercase : int = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase : Union[str, Any] = """Tôi là VinAI Research"""
lowercase : List[str] = """T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h""".split()
lowercase : int = tokenizer.tokenize(snake_case )
print(snake_case )
self.assertListEqual(snake_case , snake_case )
lowercase : List[Any] = tokens + [tokenizer.unk_token]
lowercase : Union[str, Any] = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
| 607
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCamelCase__ :
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case ) -> Dict:
"""simple docstring"""
return None
class lowerCamelCase__ :
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case , snake_case ) -> List[str]:
"""simple docstring"""
return None
class lowerCamelCase__ ( unittest.TestCase ):
__UpperCAmelCase = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(snake_case , """tf""" , 1_2 , **snake_case )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(snake_case , """pt""" , 1_2 , **snake_case )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
from transformers import BertModel
lowercase : str = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(snake_case ) )
vocab_file.flush()
lowercase : Any = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase : Any = BertModel(BertConfig(vocab_size=len(snake_case ) ) )
model.save_pretrained(snake_case )
self._test_export(snake_case , """pt""" , 1_2 , snake_case )
@require_tf
@slow
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase : int = self._test_export(snake_case , """tf""" , 1_2 , **snake_case )
lowercase : str = quantize(Path(snake_case ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(snake_case ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase : int = self._test_export(snake_case , """pt""" , 1_2 , **snake_case )
lowercase : Dict = quantize(snake_case )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(snake_case ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case , snake_case=None , **snake_case ) -> List[str]:
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase : Union[str, Any] = Path(snake_case ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case )
return path
except Exception as e:
self.fail(snake_case )
@require_torch
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
from transformers import BertModel
lowercase : List[Any] = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowercase : int = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(snake_case , snake_case , """pt""" )
@require_tf
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
from transformers import TFBertModel
lowercase : Dict = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowercase : Any = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(snake_case , snake_case , """tf""" )
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case ) -> List[Any]:
"""simple docstring"""
lowercase : Tuple = FeatureExtractionPipeline(snake_case , snake_case )
lowercase : str = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowercase , lowercase , lowercase , lowercase : Dict = infer_shapes(snake_case , snake_case )
# Assert all variables are present
self.assertEqual(len(snake_case ) , len(snake_case ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , snake_case )
self.assertSequenceEqual(variable_names[3:] , snake_case )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
lowercase : Union[str, Any] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowercase : List[Any] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowercase , lowercase : int = ensure_valid_input(FuncContiguousArgs() , snake_case , snake_case )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(snake_case ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(snake_case ) , set(snake_case ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(snake_case , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase , lowercase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , snake_case , snake_case )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(snake_case ) , 1 )
self.assertEqual(len(snake_case ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
lowercase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 607
| 1
|
from scipy.stats import spearmanr
import datasets
UpperCAmelCase__ : Optional[Any] = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
UpperCAmelCase__ : List[Any] = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
UpperCAmelCase__ : Union[str, Any] = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def a_ ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : str=False ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = spearmanr(UpperCAmelCase_ , UpperCAmelCase_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 701
|
import baseaa
def _A ( _UpperCamelCase ):
return baseaa.baaencode(string.encode('''utf-8''' ) )
def _A ( _UpperCamelCase ):
return baseaa.baadecode(_UpperCamelCase ).decode('''utf-8''' )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = 'Hello World!'
UpperCAmelCase__ : List[str] = baseaa_encode(test)
print(encoded)
UpperCAmelCase__ : Optional[int] = baseaa_decode(encoded)
print(decoded)
| 416
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=2 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , A=0 , ) -> Union[str, Any]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
_a = projection_dim
def a__ (self ) -> int:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
_a = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self , A , A , A , A , A , A , A ) -> int:
"""simple docstring"""
_a = TFDPRContextEncoder(config=A )
_a = model(A , attention_mask=A , token_type_ids=A )
_a = model(A , token_type_ids=A )
_a = model(A )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A ) -> Tuple:
"""simple docstring"""
_a = TFDPRQuestionEncoder(config=A )
_a = model(A , attention_mask=A , token_type_ids=A )
_a = model(A , token_type_ids=A )
_a = model(A )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A ) -> List[str]:
"""simple docstring"""
_a = TFDPRReader(config=A )
_a = model(A , attention_mask=A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__lowerCamelCase : List[str] = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
__lowerCamelCase : int = False
__lowerCamelCase : Any = False
__lowerCamelCase : Any = False
__lowerCamelCase : List[Any] = False
__lowerCamelCase : List[Any] = False
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = TFDPRModelTester(self )
_a = ConfigTester(self , config_class=A , hidden_size=37 )
def a__ (self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*A )
def a__ (self ) -> int:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*A )
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*A )
@slow
def a__ (self ) -> List[Any]:
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = TFDPRContextEncoder.from_pretrained(A )
self.assertIsNotNone(A )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = TFDPRContextEncoder.from_pretrained(A )
self.assertIsNotNone(A )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = TFDPRQuestionEncoder.from_pretrained(A )
self.assertIsNotNone(A )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = TFDPRReader.from_pretrained(A )
self.assertIsNotNone(A )
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> Any:
"""simple docstring"""
_a = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
_a = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_a = model(A )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_a = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 11
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
lowercase__ : Optional[Any] = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ) -> List[str]:
a = bnb_quantization_config.load_in_abit
a = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed.")
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed.")
a = []
# custom device map
if isinstance(__UpperCamelCase , __UpperCamelCase) and len(device_map.keys()) > 1:
a = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
a = get_keys_to_not_convert(__UpperCamelCase)
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__UpperCamelCase)
a = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
a = []
a = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__UpperCamelCase)
# compatibility with peft
a = load_in_abit
a = load_in_abit
a = get_parameter_device(__UpperCamelCase)
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager.")
a = replace_with_bnb_layers(__UpperCamelCase , __UpperCamelCase , modules_to_not_convert=__UpperCamelCase)
# convert param to the right dtype
a = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules):
param.to(torch.floataa)
if param.dtype != torch.floataa:
a = name.replace(".weight" , "").replace(".bias" , "")
a = getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
if param is not None:
param.to(torch.floataa)
elif torch.is_floating_point(__UpperCamelCase):
param.to(__UpperCamelCase)
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device())
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device())
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"We move the model to cuda.")
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''')
else:
with init_empty_weights():
a = replace_with_bnb_layers(
__UpperCamelCase , __UpperCamelCase , modules_to_not_convert=__UpperCamelCase)
a = get_quantized_model_device_map(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , max_memory=__UpperCamelCase , no_split_module_classes=__UpperCamelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
a = True
a = any(x in list(device_map.values()) for x in ["cpu", "disk"])
load_checkpoint_in_model(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__UpperCamelCase , offload_state_dict=__UpperCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__UpperCamelCase , device_map=__UpperCamelCase , offload_dir=__UpperCamelCase)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None) -> Any:
if device_map is None:
if torch.cuda.is_available():
a = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`.")
if isinstance(__UpperCamelCase , __UpperCamelCase):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'.")
a = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules)
})
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules)
})
a = {}
a = special_dtypes
a = no_split_module_classes
a = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
a = get_balanced_memory(
__UpperCamelCase , low_zero=(device_map == "balanced_low_0") , max_memory=__UpperCamelCase , **__UpperCamelCase , )
a = max_memory
a = infer_auto_device_map(__UpperCamelCase , **__UpperCamelCase)
if isinstance(__UpperCamelCase , __UpperCamelCase):
# check if don't have any quantized module on the cpu
a = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
a = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ")
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit")
del device_map_without_some_modules
return device_map
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None) -> List[Any]:
if modules_to_not_convert is None:
a = []
a , a = _replace_with_bnb_layers(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug.")
return model
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , ) -> List[str]:
a = False
for name, module in model.named_children():
if current_key_name is None:
a = []
current_key_name.append(__UpperCamelCase)
if isinstance(__UpperCamelCase , nn.Linear) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
a = ".".join(__UpperCamelCase)
a = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
a = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
a = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__UpperCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
a = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False")
a = module.weight.data
if module.bias is not None:
a = module.bias.data
bnb_module.requires_grad_(__UpperCamelCase)
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
a = True
if len(list(module.children())) > 0:
a , a = _replace_with_bnb_layers(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
a = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> List[str]:
# Create a copy of the model
with init_empty_weights():
a = deepcopy(__UpperCamelCase) # this has 0 cost since it is done inside `init_empty_weights` context manager`
a = find_tied_parameters(__UpperCamelCase)
# For compatibility with Accelerate < 0.18
if isinstance(__UpperCamelCase , __UpperCamelCase):
a = sum(list(tied_params.values()) , []) + list(tied_params.keys())
else:
a = sum(__UpperCamelCase , [])
a = len(__UpperCamelCase) > 0
# Check if it is a base model
a = False
if hasattr(__UpperCamelCase , "base_model_prefix"):
a = not hasattr(__UpperCamelCase , model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
a = list(model.named_children())
a = [list_modules[-1][0]]
# add last module together with tied weights
a = set(__UpperCamelCase) - set(__UpperCamelCase)
a = list(set(__UpperCamelCase)) + list(__UpperCamelCase)
# remove ".weight" from the keys
a = [".weight", ".bias"]
a = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
a = name.replace(__UpperCamelCase , "")
filtered_module_names.append(__UpperCamelCase)
return filtered_module_names
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Union[str, Any]:
for m in model.modules():
if isinstance(__UpperCamelCase , bnb.nn.Linearabit):
return True
return False
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> List[Any]:
return next(parameter.parameters()).device
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> Optional[Any]:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__UpperCamelCase , __UpperCamelCase , 0 , dtype=__UpperCamelCase , value=__UpperCamelCase)
a = param_name
a = model
if "." in tensor_name:
a = tensor_name.split(".")
for split in splits[:-1]:
a = getattr(__UpperCamelCase , __UpperCamelCase)
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''')
a = new_module
a = splits[-1]
# offload weights
a = False
offload_weight(module._parameters[tensor_name] , __UpperCamelCase , __UpperCamelCase , index=__UpperCamelCase)
if hasattr(module._parameters[tensor_name] , "SCB"):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB") , __UpperCamelCase , index=__UpperCamelCase , )
else:
offload_weight(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , index=__UpperCamelCase)
offload_weight(__UpperCamelCase , param_name.replace("weight" , "SCB") , __UpperCamelCase , index=__UpperCamelCase)
set_module_tensor_to_device(__UpperCamelCase , __UpperCamelCase , "meta" , dtype=__UpperCamelCase , value=torch.empty(*param.size()))
| 515
| 0
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
_SCREAMING_SNAKE_CASE = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 1
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class UpperCamelCase_ ( unittest.TestCase , a_ ):
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = load_tool("""text-classification""" )
self.tool.setup()
UpperCAmelCase = load_tool("""text-classification""" , remote=snake_case__ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(snake_case__ , """positive""" )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(snake_case__ , """positive""" )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(snake_case__ , """positive""" )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(snake_case__ , """positive""" )
| 673
|
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class UpperCamelCase_ ( a_ ):
_A : Union[List[PIL.Image.Image], np.ndarray]
_A : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 673
| 1
|
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , _lowerCAmelCase , )
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : List[Any] = RobertaConfig
UpperCamelCase_ : Tuple = '''roberta'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple )-> Union[str, Any]:
"""simple docstring"""
super().__init__(UpperCAmelCase_ )
UpperCamelCase = RobertaEmbeddings(UpperCAmelCase_ )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , _lowerCAmelCase , )
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Optional[Any] = RobertaConfig
UpperCamelCase_ : Any = '''roberta'''
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
super().__init__(UpperCAmelCase_ )
UpperCamelCase = config.num_labels
UpperCamelCase = config.num_hidden_layers
UpperCamelCase = DeeRobertaModel(UpperCAmelCase_ )
UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[int]=-1 , UpperCAmelCase_ : Tuple=False , )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.num_layers
try:
UpperCamelCase = self.roberta(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ , )
UpperCamelCase = outputs[1]
UpperCamelCase = self.dropout(UpperCAmelCase_ )
UpperCamelCase = self.classifier(UpperCAmelCase_ )
UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCamelCase = e.message
UpperCamelCase = e.exit_layer
UpperCamelCase = outputs[0]
if not self.training:
UpperCamelCase = entropy(UpperCAmelCase_ )
UpperCamelCase = []
UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCamelCase = MSELoss()
UpperCamelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCamelCase = []
for highway_exit in outputs[-1]:
UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCAmelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCamelCase = MSELoss()
UpperCamelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCAmelCase_ )
if train_highway:
UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCamelCase = (loss,) + outputs
if not self.training:
UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 715
|
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(64, 64)
)
SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
SCREAMING_SNAKE_CASE = """Normal"""
if result[0][0] == 1:
SCREAMING_SNAKE_CASE = """Abnormality detected"""
| 556
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A = 16
__A = 32
def lowercase__ ( A_: Accelerator , A_: int = 16 ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase =AutoTokenizer.from_pretrained("""bert-base-cased""" )
__UpperCAmelCase =load_dataset("""glue""" , """mrpc""" )
def tokenize_function(A_: Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A_ , max_length=A_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCAmelCase =datasets.map(
A_ , batched=A_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase =tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(A_: Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCAmelCase =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCAmelCase =16
elif accelerator.mixed_precision != "no":
__UpperCAmelCase =8
else:
__UpperCAmelCase =None
return tokenizer.pad(
A_ , padding="""longest""" , max_length=A_ , pad_to_multiple_of=A_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
__UpperCAmelCase =DataLoader(
tokenized_datasets["""train"""] , shuffle=A_ , collate_fn=A_ , batch_size=A_ )
__UpperCAmelCase =DataLoader(
tokenized_datasets["""validation"""] , shuffle=A_ , collate_fn=A_ , batch_size=A_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A = mocked_dataloaders # noqa: F811
def lowercase__ ( A_: List[str] , A_: List[str] ) -> Dict:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , A_ ) == "1":
__UpperCAmelCase =2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__UpperCAmelCase =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
__UpperCAmelCase =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase =config["""lr"""]
__UpperCAmelCase =int(config["""num_epochs"""] )
__UpperCAmelCase =int(config["""seed"""] )
__UpperCAmelCase =int(config["""batch_size"""] )
set_seed(A_ )
__UpperCAmelCase , __UpperCAmelCase =get_dataloaders(A_ , A_ )
__UpperCAmelCase =evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
__UpperCAmelCase =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCAmelCase =batch_size // MAX_GPU_BATCH_SIZE
__UpperCAmelCase =MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=A_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase =model.to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase =AdamW(params=model.parameters() , lr=A_ )
# Instantiate scheduler
__UpperCAmelCase =get_linear_schedule_with_warmup(
optimizer=A_ , num_warmup_steps=100 , num_training_steps=(len(A_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =accelerator.prepare(
A_ , A_ , A_ , A_ , A_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__UpperCAmelCase =os.path.split(A_ )[-1].split(""".""" )[0]
accelerator.init_trackers(A_ , A_ )
# Now we train the model
for epoch in range(A_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__UpperCAmelCase =0
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCAmelCase =model(**A_ )
__UpperCAmelCase =outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__UpperCAmelCase =loss / gradient_accumulation_steps
accelerator.backward(A_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase =model(**A_ )
__UpperCAmelCase =outputs.logits.argmax(dim=-1 )
__UpperCAmelCase , __UpperCAmelCase =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=A_ , references=A_ , )
__UpperCAmelCase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , A_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(A_ ),
"""epoch""": epoch,
} , step=A_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=A_ , default=A_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=A_ , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
__UpperCAmelCase =parser.parse_args()
__UpperCAmelCase ={"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(A_ , A_ )
if __name__ == "__main__":
main()
| 68
|
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase (*_lowerCamelCase : str , _lowerCamelCase : Optional[Union[Dict, Any]] = None , _lowerCamelCase : List[Any]=True , _lowerCamelCase : str=2 )-> str:
'''simple docstring'''
from .. import __version__
__snake_case = take_from
__snake_case = ()
if not isinstance(args[0] , _lowerCamelCase ):
__snake_case = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
__snake_case = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__snake_case = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__snake_case = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__snake_case = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case = call_frame.filename
__snake_case = call_frame.lineno
__snake_case = call_frame.function
__snake_case , __snake_case = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 24
| 0
|
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self , lowerCAmelCase_) -> int:
"""simple docstring"""
a_ =parent
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
return {}
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
a_ ="\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =MarkupLMFeatureExtractionTester(self)
@property
def lowercase_ ( self) -> Dict:
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =self.feature_extraction_class()
# Test not batched input
a_ =get_html_strings()[0]
a_ =feature_extractor(lowerCAmelCase_)
# fmt: off
a_ =[["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
a_ =[["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , lowerCAmelCase_)
self.assertEqual(encoding.xpaths , lowerCAmelCase_)
# Test batched
a_ =get_html_strings()
a_ =feature_extractor(lowerCAmelCase_)
# fmt: off
a_ =expected_nodes + [["My First Heading", "My first paragraph."]]
a_ =expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes) , 2)
self.assertEqual(len(encoding.xpaths) , 2)
self.assertEqual(encoding.nodes , lowerCAmelCase_)
self.assertEqual(encoding.xpaths , lowerCAmelCase_)
| 41
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 41
| 1
|
from math import pi, sqrt, tan
def lowerCamelCase__ ( _a):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values")
return 6 * side_length**2
def lowerCamelCase__ ( _a , _a , _a):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values")
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values")
return 4 * pi * radius**2
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values")
return 3 * pi * radius**2
def lowerCamelCase__ ( _a , _a):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values")
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase__ ( _a , _a , _a):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values")
SCREAMING_SNAKE_CASE : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase__ ( _a , _a):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values")
return 2 * pi * radius * (height + radius)
def lowerCamelCase__ ( _a , _a):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values")
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori")
return 4 * pow(_a , 2) * torus_radius * tube_radius
def lowerCamelCase__ ( _a , _a):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values")
return length * width
def lowerCamelCase__ ( _a):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values")
return side_length**2
def lowerCamelCase__ ( _a , _a):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values")
return (base * height) / 2
def lowerCamelCase__ ( _a , _a , _a):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values")
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle")
SCREAMING_SNAKE_CASE : List[str] = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE : Optional[int] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea))
return area
def lowerCamelCase__ ( _a , _a):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values")
return base * height
def lowerCamelCase__ ( _a , _a , _a):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values")
return 1 / 2 * (basea + basea) * height
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values")
return pi * radius**2
def lowerCamelCase__ ( _a , _a):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values")
return pi * radius_x * radius_y
def lowerCamelCase__ ( _a , _a):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values")
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase__ ( _a , _a):
if not isinstance(_a , _a) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides")
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side")
return (sides * length**2) / (4 * tan(pi / sides))
return (sides * length**2) / (4 * tan(pi / sides))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 25
|
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 477
| 0
|
from collections import namedtuple
lowerCAmelCase__ : Optional[Any] = namedtuple('''from_to''', '''from_ to''')
lowerCAmelCase__ : Optional[Any] = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_01, 10_00),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_04_54, 2_64.1_72),
'''cubicyard''': from_to(0.7_64_55, 1.3_07_95),
'''cubicfoot''': from_to(0.0_28, 35.31_47),
'''cup''': from_to(0.0_00_23_65_88, 42_26.75),
}
def UpperCamelCase__ ( A__ , A__ , A__ ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid \'from_type\' value: {from_type!r} Supported values are:\n"""
+ ', '.join(snake_case__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid \'to_type\' value: {to_type!r}. Supported values are:\n"""
+ ', '.join(snake_case__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowerCAmelCase__ : List[str] = HfApi()
lowerCAmelCase__ : str = {}
# fmt: off
lowerCAmelCase__ : int = torch.tensor([
-0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67,
1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89,
-1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39,
0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57
])
lowerCAmelCase__ : Dict = torch.tensor([
-2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36,
1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08,
-2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48,
2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65
])
lowerCAmelCase__ : Dict = torch.tensor([
-0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69,
-0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04,
-0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25,
0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43
])
lowerCAmelCase__ : List[str] = torch.tensor([
0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72,
-0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09,
0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05,
-0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05
])
lowerCAmelCase__ : Union[str, Any] = torch.tensor([
0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33,
-0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95,
0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59,
-0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86
])
lowerCAmelCase__ : List[Any] = torch.tensor([
0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78,
-0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30,
0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83,
-0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31
])
lowerCAmelCase__ : Optional[Any] = torch.tensor([
0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42,
-0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98,
0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74,
-0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90
])
lowerCAmelCase__ : List[str] = torch.tensor([
0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42,
-0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90,
0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46,
-0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73
])
lowerCAmelCase__ : List[str] = torch.tensor([
-1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30,
1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43,
-2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10,
1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51])
lowerCAmelCase__ : List[Any] = torch.tensor([
-1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24,
0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81,
-2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59,
1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66
])
lowerCAmelCase__ : Tuple = torch.tensor([
-1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12,
0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27,
-2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31,
1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55
])
lowerCAmelCase__ : List[str] = torch.tensor([
-2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59,
1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51,
-3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41,
3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66
])
lowerCAmelCase__ : Dict = torch.tensor([
-2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40,
1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98,
-2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95,
2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43
])
lowerCAmelCase__ : Dict = torch.tensor([
-2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36,
1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08,
-3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60,
3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43
])
lowerCAmelCase__ : Any = torch.tensor([
-1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44,
1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91,
-2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39,
1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19
])
# fmt: on
lowerCAmelCase__ : Any = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowerCAmelCase__ : List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith('''CompVis'''):
lowerCAmelCase__ : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
lowerCAmelCase__ : str = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowerCAmelCase__ : Any = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowerCAmelCase__ : List[str] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowerCAmelCase__ : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 699
| 0
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : List[str] = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self, **lowerCamelCase__ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A : List[str] = deprecated_arg[3:]
A : List[str] = not kwargs.pop(lowerCamelCase__ )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
A : Optional[Any] = kwargs.pop("""tpu_name""", self.tpu_name )
A : Tuple = kwargs.pop("""device_idx""", self.device_idx )
A : Union[str, Any] = kwargs.pop("""eager_mode""", self.eager_mode )
A : Tuple = kwargs.pop("""use_xla""", self.use_xla )
super().__init__(**lowerCamelCase__ )
__lowerCamelCase : str = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Name of TPU"} , )
__lowerCamelCase : int = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
__lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Benchmark models in eager model."} )
__lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def _lowerCAmelCase ( self ):
requires_backends(self, ["""tf"""] )
A : List[str] = None
if self.tpu:
try:
if self.tpu_name:
A : str = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
A : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
A : Union[str, Any] = None
return tpu
@cached_property
def _lowerCAmelCase ( self ):
requires_backends(self, ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
A : Optional[int] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx], """GPU""" )
A : Union[str, Any] = tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([], """GPU""" ) # disable GPU
A : Union[str, Any] = tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def _lowerCAmelCase ( self ):
requires_backends(self, ["""tf"""] )
return self._setup_tpu is not None
@property
def _lowerCAmelCase ( self ):
requires_backends(self, ["""tf"""] )
return self._setup_strategy
@property
def _lowerCAmelCase ( self ):
requires_backends(self, ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _lowerCAmelCase ( self ):
requires_backends(self, ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _lowerCAmelCase ( self ):
return self.n_gpu > 0
| 662
|
from typing import Any
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Any = v.conjugate().T
A : List[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A : str = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 662
| 1
|
import gc
import threading
import time
import psutil
import torch
class __snake_case :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase__ = psutil.Process()
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = -1
while True:
lowerCAmelCase__ = max(self.process.memory_info().rss ,self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = True
lowerCAmelCase__ = threading.Thread(target=self.peak_monitor )
lowerCAmelCase__ = True
self.thread.start()
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = False
self.thread.join()
return self.cpu_memory_peak
_lowerCAmelCase : List[str] = PeakCPUMemory()
def UpperCAmelCase_ ( ) -> str:
"""simple docstring"""
lowerCAmelCase__ = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowerCAmelCase__ = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
lowerCAmelCase__ = torch.cuda.memory_allocated(snake_case__ )
torch.cuda.reset_peak_memory_stats()
return measures
def UpperCAmelCase_ ( snake_case__ ) -> str:
"""simple docstring"""
lowerCAmelCase__ = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowerCAmelCase__ = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
lowerCAmelCase__ = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
lowerCAmelCase__ = (torch.cuda.memory_allocated(snake_case__ ) - start_measures[str(snake_case__ )]) / 2**20
lowerCAmelCase__ = (torch.cuda.max_memory_allocated(snake_case__ ) - start_measures[str(snake_case__ )]) / 2**20
return measures
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> List[str]:
"""simple docstring"""
print(f'{description}:' )
print(f'- Time: {measures["time"]:.2f}s' )
for i in range(torch.cuda.device_count() ):
print(f'- GPU {i} allocated: {measures[str(snake_case__ )]:.2f}MiB' )
lowerCAmelCase__ = measures[f'{i}-peak']
print(f'- GPU {i} peak: {peak:.2f}MiB' )
print(f'- CPU RAM allocated: {measures["cpu"]:.2f}MiB' )
print(f'- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB' )
| 604
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __snake_case ( SCREAMING_SNAKE_CASE ):
def __init__( self ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = params
lowerCAmelCase__ = np.array(a_ )
lowerCAmelCase__ = np.array([len(a_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self ,a_ ):
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
"""simple docstring"""
return len(self.lengths )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.params.max_model_input_size
lowerCAmelCase__ = self.lengths > max_len
logger.info(f'Splitting {sum(a_ )} too long sequences.' )
def divide_chunks(a_ ,a_ ):
return [l[i : i + n] for i in range(0 ,len(a_ ) ,a_ )]
lowerCAmelCase__ = []
lowerCAmelCase__ = []
if self.params.mlm:
lowerCAmelCase__ , lowerCAmelCase__ = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
lowerCAmelCase__ , lowerCAmelCase__ = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids ,self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowerCAmelCase__ = []
for sub_s in divide_chunks(seq_ ,max_len - 2 ):
if sub_s[0] != cls_id:
lowerCAmelCase__ = np.insert(a_ ,0 ,a_ )
if sub_s[-1] != sep_id:
lowerCAmelCase__ = np.insert(a_ ,len(a_ ) ,a_ )
assert len(a_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(a_ )
new_tok_ids.extend(a_ )
new_lengths.extend([len(a_ ) for l in sub_seqs] )
lowerCAmelCase__ = np.array(a_ )
lowerCAmelCase__ = np.array(a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = len(self )
lowerCAmelCase__ = self.lengths > 11
lowerCAmelCase__ = self.token_ids[indices]
lowerCAmelCase__ = self.lengths[indices]
lowerCAmelCase__ = len(self )
logger.info(f'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowerCAmelCase__ = self.params.special_tok_ids['unk_token']
lowerCAmelCase__ = len(self )
lowerCAmelCase__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowerCAmelCase__ = (unk_occs / self.lengths) < 0.5
lowerCAmelCase__ = self.token_ids[indices]
lowerCAmelCase__ = self.lengths[indices]
lowerCAmelCase__ = len(self )
logger.info(f'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
if not self.params.is_master:
return
logger.info(f'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = [t[0] for t in batch]
lowerCAmelCase__ = [t[1] for t in batch]
assert len(a_ ) == len(a_ )
# Max for paddings
lowerCAmelCase__ = max(a_ )
# Pad token ids
if self.params.mlm:
lowerCAmelCase__ = self.params.special_tok_ids['pad_token']
else:
lowerCAmelCase__ = self.params.special_tok_ids['unk_token']
lowerCAmelCase__ = [list(t.astype(a_ ) ) + [pad_idx] * (max_seq_len_ - len(a_ )) for t in token_ids]
assert len(tk_ ) == len(a_ )
assert all(len(a_ ) == max_seq_len_ for t in tk_ )
lowerCAmelCase__ = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowerCAmelCase__ = torch.tensor(a_ ) # (bs)
return tk_t, lg_t
| 604
| 1
|
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class a__ ( lowerCamelCase_ ):
def __init__( self : Optional[Any],_A : Tuple,_A : Tuple ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCAmelCase__,scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__( self : Dict,_A : int = 1,_A : Any = None,_A : int = 50,_A : Dict = "pil",_A : Union[str, Any] = True,**_A : Tuple,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),generator=lowerCAmelCase__,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image.to(self.device )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.unet(lowerCAmelCase__,lowerCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE_ : Optional[int] = self.scheduler.step(lowerCAmelCase__,lowerCAmelCase__,lowerCAmelCase__ ).prev_sample
SCREAMING_SNAKE_CASE_ : str = (image / 2 + 0.5).clamp(0,1 )
SCREAMING_SNAKE_CASE_ : List[str] = image.cpu().permute(0,2,3,1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ : Any = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=lowerCAmelCase__ ), "This is a local test"
| 216
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCamelCase = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = PegasusConfig
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : Dict = """gelu"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=20 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , ) -> Tuple:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = bos_token_id
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
SCREAMING_SNAKE_CASE = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE = np.concatenate([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE = prepare_pegasus_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = 20
SCREAMING_SNAKE_CASE = model_class_name(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model.encode(inputs_dict['input_ids'] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = model.decode(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
SCREAMING_SNAKE_CASE = 20
SCREAMING_SNAKE_CASE = model_class_name(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model.encode(inputs_dict['input_ids'] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
SCREAMING_SNAKE_CASE = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = model.decode(lowerCAmelCase__ , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def lowercase (SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : Tuple=None , ) -> Union[str, Any]:
if attention_mask is None:
SCREAMING_SNAKE_CASE = np.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Any = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = FlaxPegasusModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ )
def __A ( self ) -> Any:
self.config_tester.run_common_tests()
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
@jax.jit
def encode_jitted(lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
return model.encode(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE = encode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE = encode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
SCREAMING_SNAKE_CASE = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return model.decode(
decoder_input_ids=lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , encoder_outputs=lowerCAmelCase__ , )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE = decode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE = decode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __A ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class_name.from_pretrained('google/pegasus-large' , from_pt=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = np.ones((1, 1) )
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@slow
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
SCREAMING_SNAKE_CASE = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
SCREAMING_SNAKE_CASE = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
SCREAMING_SNAKE_CASE = [
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors='np' , truncation=lowerCAmelCase__ , max_length=512 , padding=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model.generate(**lowerCAmelCase__ , num_beams=2 ).sequences
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
assert tgt_text == decoded
| 247
| 0
|
"""simple docstring"""
def lowercase ( ) -> int:
return 1
def lowercase ( __UpperCamelCase ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowercase ( __UpperCamelCase ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(__UpperCamelCase )
def lowercase ( __UpperCamelCase ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(__UpperCamelCase )
def lowercase ( __UpperCamelCase ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(__UpperCamelCase )
def lowercase ( __UpperCamelCase ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(__UpperCamelCase )
def lowercase ( __UpperCamelCase ) -> int:
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(__UpperCamelCase )
def lowercase ( __UpperCamelCase ) -> int:
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(__UpperCamelCase )
def lowercase ( __UpperCamelCase = 200 ) -> int:
return two_pound(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 705
|
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class _lowercase :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ):
__magic_name__ = start
__magic_name__ = end
__magic_name__ = val
__magic_name__ = (start + end) // 2
__magic_name__ = left
__magic_name__ = right
def __repr__( self ):
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class _lowercase :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = collection
__magic_name__ = function
if self.collection:
__magic_name__ = self._build_tree(0 , len(UpperCamelCase_ ) - 1 )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
self._update_tree(self.root , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
return self._query_range(self.root , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
if start == end:
return SegmentTreeNode(UpperCamelCase_ , UpperCamelCase_ , self.collection[start] )
__magic_name__ = (start + end) // 2
__magic_name__ = self._build_tree(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = self._build_tree(mid + 1 , UpperCamelCase_ )
return SegmentTreeNode(UpperCamelCase_ , UpperCamelCase_ , self.fn(left.val , right.val ) , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if node.start == i and node.end == i:
__magic_name__ = val
return
if i <= node.mid:
self._update_tree(node.left , UpperCamelCase_ , UpperCamelCase_ )
else:
self._update_tree(node.right , UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = self.fn(node.left.val , node.right.val )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , UpperCamelCase_ , UpperCamelCase_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , UpperCamelCase_ , node.mid ) , self._query_range(node.right , node.mid + 1 , UpperCamelCase_ ) , )
else:
# range in right child tree
return self._query_range(node.right , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
if self.root is not None:
__magic_name__ = Queue()
queue.put(self.root )
while not queue.empty():
__magic_name__ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("*" * 50)
__lowerCamelCase = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 190
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : Dict = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[Any] = 'vit_mae'
def __init__( self : str , lowerCAmelCase__ : Optional[int]=768 , lowerCAmelCase__ : Optional[int]=12 , lowerCAmelCase__ : List[Any]=12 , lowerCAmelCase__ : Dict=3072 , lowerCAmelCase__ : Optional[int]="gelu" , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : List[str]=0.0 , lowerCAmelCase__ : List[Any]=0.02 , lowerCAmelCase__ : List[str]=1e-1_2 , lowerCAmelCase__ : Union[str, Any]=224 , lowerCAmelCase__ : Dict=16 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=16 , lowerCAmelCase__ : Union[str, Any]=512 , lowerCAmelCase__ : Optional[Any]=8 , lowerCAmelCase__ : Optional[Any]=2048 , lowerCAmelCase__ : Optional[int]=0.75 , lowerCAmelCase__ : List[str]=False , **lowerCAmelCase__ : int , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = qkv_bias
_UpperCamelCase = decoder_num_attention_heads
_UpperCamelCase = decoder_hidden_size
_UpperCamelCase = decoder_num_hidden_layers
_UpperCamelCase = decoder_intermediate_size
_UpperCamelCase = mask_ratio
_UpperCamelCase = norm_pix_loss
| 98
|
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : str = RobertaEmbeddings(_lowercase )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Optional[Any] = config.num_labels
snake_case_ : Dict = config.num_hidden_layers
snake_case_ : str = DeeRobertaModel(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : List[str] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = self.num_layers
try:
snake_case_ : int = self.roberta(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
snake_case_ : str = outputs[1]
snake_case_ : Union[str, Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : List[Any] = e.message
snake_case_ : Union[str, Any] = e.exit_layer
snake_case_ : Dict = outputs[0]
if not self.training:
snake_case_ : Dict = entropy(_lowercase )
snake_case_ : Optional[int] = []
snake_case_ : Union[str, Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Dict = MSELoss()
snake_case_ : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Union[str, Any] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : int = []
for highway_exit in outputs[-1]:
snake_case_ : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Optional[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Optional[int] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : List[str] = (loss,) + outputs
if not self.training:
snake_case_ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 58
| 0
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_A = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __UpperCamelCase ( _A ):
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __UpperCamelCase ( _A ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def __UpperCamelCase ( _A ):
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase_ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def __UpperCamelCase ( _A , _A ):
if exitstatus == 5:
lowerCAmelCase_ = 0
# Doctest custom flag to ignore output.
_A = doctest.register_optionflag('''IGNORE_RESULT''')
_A = doctest.OutputChecker
class A ( lowercase__ ):
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
_A = CustomOutputChecker
_A = HfDoctestModule
_A = HfDocTestParser
| 704
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_A = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class A ( __UpperCAmelCase ):
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
super().__init__(*UpperCamelCase__, **UpperCamelCase__ )
requires_backends(self, '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__=None ):
"""simple docstring"""
lowerCAmelCase_ = {}
lowerCAmelCase_ = {}
if prompt is not None:
lowerCAmelCase_ = prompt
if generate_kwargs is not None:
lowerCAmelCase_ = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowerCAmelCase_ = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
lowerCAmelCase_ = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self, UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return super().__call__(UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=None ):
"""simple docstring"""
lowerCAmelCase_ = load_image(UpperCamelCase__ )
if prompt is not None:
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
raise ValueError(
f"Received an invalid text input, got - {type(UpperCamelCase__ )} - but expected a single string. "
'''Note also that one single text can be provided for conditional image to text generation.''' )
lowerCAmelCase_ = self.model.config.model_type
if model_type == "git":
lowerCAmelCase_ = self.image_processor(images=UpperCamelCase__, return_tensors=self.framework )
lowerCAmelCase_ = self.tokenizer(text=UpperCamelCase__, add_special_tokens=UpperCamelCase__ ).input_ids
lowerCAmelCase_ = [self.tokenizer.cls_token_id] + input_ids
lowerCAmelCase_ = torch.tensor(UpperCamelCase__ ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
lowerCAmelCase_ = self.image_processor(images=UpperCamelCase__, header_text=UpperCamelCase__, return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowerCAmelCase_ = self.image_processor(images=UpperCamelCase__, return_tensors=self.framework )
lowerCAmelCase_ = self.tokenizer(UpperCamelCase__, return_tensors=self.framework )
model_inputs.update(UpperCamelCase__ )
else:
raise ValueError(f"Model type {model_type} does not support conditional text generation" )
else:
lowerCAmelCase_ = self.image_processor(images=UpperCamelCase__, return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowerCAmelCase_ = None
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=None ):
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''], UpperCamelCase__ )
and all(x is None for x in model_inputs['''input_ids'''] )
):
lowerCAmelCase_ = None
if generate_kwargs is None:
lowerCAmelCase_ = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowerCAmelCase_ = model_inputs.pop(self.model.main_input_name )
lowerCAmelCase_ = self.model.generate(UpperCamelCase__, **UpperCamelCase__, **UpperCamelCase__ )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = []
for output_ids in model_outputs:
lowerCAmelCase_ = {
'''generated_text''': self.tokenizer.decode(
UpperCamelCase__, skip_special_tokens=UpperCamelCase__, )
}
records.append(UpperCamelCase__ )
return records
| 325
| 0
|
'''simple docstring'''
def lowerCamelCase__ ( a__ , a__ , a__) -> float:
"""simple docstring"""
return round(float(moles / volume) * nfactor)
def lowerCamelCase__ ( a__ , a__ , a__) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (volume)))
def lowerCamelCase__ ( a__ , a__ , a__) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure)))
def lowerCamelCase__ ( a__ , a__ , a__) -> float:
"""simple docstring"""
return round(float((pressure * volume) / (0.0_8_2_1 * moles)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 517
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
SCREAMING_SNAKE_CASE_ = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = '''albert'''
def __init__( self : List[Any] , snake_case : str=30000 , snake_case : Optional[int]=128 , snake_case : List[Any]=4096 , snake_case : str=12 , snake_case : str=1 , snake_case : Dict=64 , snake_case : Optional[Any]=16384 , snake_case : int=1 , snake_case : Any="gelu_new" , snake_case : List[str]=0 , snake_case : Any=0 , snake_case : List[str]=512 , snake_case : Optional[Any]=2 , snake_case : int=0.02 , snake_case : Tuple=1e-12 , snake_case : str=0.1 , snake_case : Optional[Any]="absolute" , snake_case : List[str]=0 , snake_case : List[Any]=2 , snake_case : Optional[int]=3 , **snake_case : str , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
_snake_case : Optional[Any] = vocab_size
_snake_case : int = embedding_size
_snake_case : List[str] = hidden_size
_snake_case : Union[str, Any] = num_hidden_layers
_snake_case : Optional[Any] = num_hidden_groups
_snake_case : Tuple = num_attention_heads
_snake_case : Any = inner_group_num
_snake_case : Union[str, Any] = hidden_act
_snake_case : Optional[int] = intermediate_size
_snake_case : str = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : List[Any] = max_position_embeddings
_snake_case : Dict = type_vocab_size
_snake_case : Dict = initializer_range
_snake_case : List[Any] = layer_norm_eps
_snake_case : str = classifier_dropout_prob
_snake_case : Union[str, Any] = position_embedding_type
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
if self.task == "multiple-choice":
_snake_case : Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 517
| 1
|
"""simple docstring"""
# Imports
import numpy as np
class a__ :
def __init__( self : Optional[int] , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : List[Any]=None):
"""simple docstring"""
self.set_matricies(red=UpperCamelCase_ , green=UpperCamelCase_ , blue=UpperCamelCase_ , red_edge=UpperCamelCase_ , nir=UpperCamelCase_)
def a_ ( self : int , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[Any]=None):
"""simple docstring"""
if red is not None:
__UpperCAmelCase : List[Any] = red
if green is not None:
__UpperCAmelCase : List[Any] = green
if blue is not None:
__UpperCAmelCase : Union[str, Any] = blue
if red_edge is not None:
__UpperCAmelCase : Dict = red_edge
if nir is not None:
__UpperCAmelCase : Optional[int] = nir
return True
def a_ ( self : List[str] , UpperCamelCase_ : int="" , UpperCamelCase_ : str=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : List[Any]=None):
"""simple docstring"""
self.set_matricies(red=UpperCamelCase_ , green=UpperCamelCase_ , blue=UpperCamelCase_ , red_edge=UpperCamelCase_ , nir=UpperCamelCase_)
__UpperCAmelCase : str = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!")
return False
def a_ ( self : List[str]):
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def a_ ( self : Any):
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def a_ ( self : List[Any]):
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def a_ ( self : int):
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def a_ ( self : Dict):
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def a_ ( self : Any):
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def a_ ( self : int):
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def a_ ( self : Optional[Any]):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def a_ ( self : List[str]):
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def a_ ( self : Any):
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def a_ ( self : List[Any]):
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def a_ ( self : List[str]):
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def a_ ( self : Any , UpperCamelCase_ : List[str]=0.08 , UpperCamelCase_ : List[str]=1.22 , UpperCamelCase_ : Tuple=0.03):
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def a_ ( self : Optional[Any]):
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def a_ ( self : Any):
"""simple docstring"""
return (self.nir / self.green) - 1
def a_ ( self : str):
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def a_ ( self : Tuple):
"""simple docstring"""
return (self.red - self.blue) / self.red
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : Dict = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def a_ ( self : Dict):
"""simple docstring"""
return self.nir - self.green
def a_ ( self : int):
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : List[str] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def a_ ( self : Dict , UpperCamelCase_ : List[str]=0.16):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def a_ ( self : Optional[int] , UpperCamelCase_ : Tuple=0.5):
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def a_ ( self : Optional[Any]):
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def a_ ( self : Any , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : int=None):
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def a_ ( self : int):
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def a_ ( self : Tuple):
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def a_ ( self : List[Any]):
"""simple docstring"""
return self.nir / self.red
def a_ ( self : Tuple):
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def a_ ( self : str):
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def a_ ( self : Dict):
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def a_ ( self : Optional[Any]):
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def a_ ( self : List[Any]):
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def a_ ( self : Tuple):
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def a_ ( self : Optional[Any]):
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Any = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
__UpperCAmelCase : Dict = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def a_ ( self : Union[str, Any]):
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def a_ ( self : Tuple):
"""simple docstring"""
return self.nir / self.red
def a_ ( self : Any):
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def a_ ( self : Dict):
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 487
|
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = False ) -> bool:
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
__UpperCAmelCase : List[str] = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
__UpperCAmelCase : Tuple = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
__UpperCAmelCase : Tuple = primes[:idx]
break
__UpperCAmelCase , __UpperCAmelCase : List[str] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
__UpperCAmelCase : Optional[int] = False
for r in range(UpperCamelCase ):
__UpperCAmelCase : Dict = pow(UpperCamelCase , d * 2**r , UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
__UpperCAmelCase : Optional[int] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _UpperCamelCase ( ) -> None:
"""simple docstring"""
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 487
| 1
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_lowercase ) , '''Tatoeba directory does not exist.''' )
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
a__ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
self.resolver.convert_models(['''heb-eng'''] )
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
a__ , a__ = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=SCREAMING_SNAKE_CASE )
assert mmeta["long_pair"] == "heb-eng"
| 194
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
a_ : Any = logging.get_logger(__name__)
a_ : str = TypeVar('DatasetType', Dataset, IterableDataset)
def __a ( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(__UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , (Dataset, IterableDataset) ):
if isinstance(__UpperCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCAmelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCAmelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCAmelCase ).__name__}." )
if i == 0:
a__ , a__ = (
(Dataset, IterableDataset) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , info=__UpperCAmelCase , split=__UpperCAmelCase , stopping_strategy=__UpperCAmelCase )
else:
return _interleave_iterable_datasets(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , info=__UpperCAmelCase , split=__UpperCAmelCase , stopping_strategy=__UpperCAmelCase )
def __a ( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , ):
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(__UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , (Dataset, IterableDataset) ):
if isinstance(__UpperCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCAmelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCAmelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCAmelCase ).__name__}." )
if i == 0:
a__ , a__ = (
(Dataset, IterableDataset) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__UpperCAmelCase , info=__UpperCAmelCase , split=__UpperCAmelCase , axis=__UpperCAmelCase )
else:
return _concatenate_iterable_datasets(__UpperCAmelCase , info=__UpperCAmelCase , split=__UpperCAmelCase , axis=__UpperCAmelCase )
| 194
| 1
|
def __lowerCAmelCase ( snake_case : int = 10**12 ) -> int:
__lowerCamelCase: str = 1
__lowerCamelCase: List[Any] = 0
__lowerCamelCase: Any = 1
__lowerCamelCase: Optional[Any] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F"""{solution() = }""")
| 702
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_A : List[str] = logging.get_logger(__name__)
_A : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
_A : int = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
_A : Dict = {
'''allenai/led-base-16384''': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __lowerCAmelCase ( ) -> List[Any]:
__lowerCamelCase: Optional[Any] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__lowerCamelCase: List[str] = bs[:]
__lowerCamelCase: Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case )
cs.append(2**8 + n )
n += 1
__lowerCamelCase: List[Any] = [chr(snake_case ) for n in cs]
return dict(zip(snake_case , snake_case ) )
def __lowerCAmelCase ( snake_case : List[str] ) -> str:
__lowerCamelCase: int = set()
__lowerCamelCase: str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase: Any = char
return pairs
class a ( _UpperCAmelCase ):
UpperCAmelCase__ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[str] = ["input_ids", "attention_mask"]
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict="replace" , SCREAMING_SNAKE_CASE_ : Dict="<s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="</s>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : Any="<s>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE_ : Tuple="<pad>" , SCREAMING_SNAKE_CASE_ : Dict="<mask>" , SCREAMING_SNAKE_CASE_ : int=False , **SCREAMING_SNAKE_CASE_ : List[str] , ):
__lowerCamelCase: Tuple = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
__lowerCamelCase: Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
__lowerCamelCase: List[Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
__lowerCamelCase: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
__lowerCamelCase: Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
__lowerCamelCase: Tuple = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase: List[str] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding="""utf-8""" ) as vocab_handle:
__lowerCamelCase: str = json.load(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[Any] = {v: k for k, v in self.encoder.items()}
__lowerCamelCase: Any = errors # how to handle errors in decoding
__lowerCamelCase: List[Any] = bytes_to_unicode()
__lowerCamelCase: Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding="""utf-8""" ) as merges_handle:
__lowerCamelCase: List[Any] = merges_handle.read().split("""\n""" )[1:-1]
__lowerCamelCase: Tuple = [tuple(merge.split() ) for merge in bpe_merges]
__lowerCamelCase: str = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__lowerCamelCase: List[str] = {}
__lowerCamelCase: Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowerCamelCase: str = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ):
if token in self.cache:
return self.cache[token]
__lowerCamelCase: List[str] = tuple(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
__lowerCamelCase: List[str] = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase , __lowerCamelCase: List[str] = bigram
__lowerCamelCase: Optional[int] = []
__lowerCamelCase: Optional[Any] = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
__lowerCamelCase: int = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCamelCase: Tuple = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase: Optional[int] = tuple(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Any = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
__lowerCamelCase: Tuple = get_pairs(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = """ """.join(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Any = word
return word
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Any ):
__lowerCamelCase: Any = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase: int = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(""" """ ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Tuple ):
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : str ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
__lowerCamelCase: Any = """""".join(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: str = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase: Dict = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase: str = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + """\n""" )
__lowerCamelCase: List[str] = 0
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
__lowerCamelCase: Tuple = token_index
writer.write(""" """.join(SCREAMING_SNAKE_CASE_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase: List[str] = [self.cls_token_id]
__lowerCamelCase: int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
__lowerCamelCase: int = [self.sep_token_id]
__lowerCamelCase: Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
__lowerCamelCase: Any = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
__lowerCamelCase: Dict = """ """ + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Union[Dict[str, EncodedInput], BatchEncoding] , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , ):
__lowerCamelCase: Optional[Any] = super()._pad(
encoded_inputs=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding_strategy=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , )
# Load from model defaults
if return_attention_mask is None:
__lowerCamelCase: Optional[Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__lowerCamelCase: List[str] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__lowerCamelCase: str = len(encoded_inputs["""global_attention_mask"""] ) != len(SCREAMING_SNAKE_CASE_ )
if needs_to_be_padded:
__lowerCamelCase: Union[str, Any] = len(SCREAMING_SNAKE_CASE_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__lowerCamelCase: str = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
__lowerCamelCase: Optional[int] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 189
| 0
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[Any] = '''sew-d'''
def __init__( self , _lowercase=32 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase=2 , _lowercase=512 , _lowercase=256 , _lowercase=True , _lowercase=True , _lowercase=("p2c", "c2p") , _lowercase="layer_norm" , _lowercase="gelu_python" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1e-7 , _lowercase=1e-5 , _lowercase="group" , _lowercase="gelu" , _lowercase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _lowercase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _lowercase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _lowercase=False , _lowercase=128 , _lowercase=16 , _lowercase=True , _lowercase=0.05 , _lowercase=10 , _lowercase=2 , _lowercase=0.0 , _lowercase=10 , _lowercase=0 , _lowercase="mean" , _lowercase=False , _lowercase=False , _lowercase=256 , _lowercase=0 , _lowercase=1 , _lowercase=2 , **_lowercase , ):
"""simple docstring"""
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = feat_extract_norm
_lowerCAmelCase = feat_extract_activation
_lowerCAmelCase = list(_lowercase )
_lowerCAmelCase = list(_lowercase )
_lowerCAmelCase = list(_lowercase )
_lowerCAmelCase = conv_bias
_lowerCAmelCase = num_conv_pos_embeddings
_lowerCAmelCase = num_conv_pos_embedding_groups
_lowerCAmelCase = len(self.conv_dim )
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = squeeze_factor
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = position_buckets
_lowerCAmelCase = share_att_key
_lowerCAmelCase = relative_attention
_lowerCAmelCase = norm_rel_ebd
_lowerCAmelCase = list(_lowercase )
_lowerCAmelCase = hidden_act
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = feat_proj_dropout
_lowerCAmelCase = final_dropout
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = feature_layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase = apply_spec_augment
_lowerCAmelCase = mask_time_prob
_lowerCAmelCase = mask_time_length
_lowerCAmelCase = mask_time_min_masks
_lowerCAmelCase = mask_feature_prob
_lowerCAmelCase = mask_feature_length
_lowerCAmelCase = mask_feature_min_masks
# ctc loss
_lowerCAmelCase = ctc_loss_reduction
_lowerCAmelCase = ctc_zero_infinity
# sequence classification
_lowerCAmelCase = use_weighted_layer_sum
_lowerCAmelCase = classifier_proj_size
@property
def _lowercase ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 5
|
__lowerCamelCase : Optional[Any] = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCamelCase : int = {value: key for key, value in MORSE_CODE_DICT.items()}
def A__ ( _a : str ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A__ ( _a : str ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def A__ ( ):
'''simple docstring'''
snake_case__ : List[Any] ="""Morse code here!"""
print(_a )
snake_case__ : Union[str, Any] =encrypt(_a )
print(_a )
snake_case__ : Optional[int] =decrypt(_a )
print(_a )
if __name__ == "__main__":
main()
| 385
| 0
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A : Optional[int] = 16
A : Tuple = 32
def a__ ( __UpperCamelCase , __UpperCamelCase = 1_6 ):
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_ = load_dataset("glue" , "mrpc" )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_ = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_ = 1_6
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_ = 8
else:
SCREAMING_SNAKE_CASE_ = None
return tokenizer.pad(
__UpperCamelCase , padding="longest" , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ = DataLoader(
tokenized_datasets["train"] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = DataLoader(
tokenized_datasets["validation"] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A : Any = mocked_dataloaders # noqa: F811
def a__ ( __UpperCamelCase , __UpperCamelCase ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , __UpperCamelCase ) == "1":
SCREAMING_SNAKE_CASE_ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
SCREAMING_SNAKE_CASE_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
SCREAMING_SNAKE_CASE_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_ = config["lr"]
SCREAMING_SNAKE_CASE_ = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_ = int(config["seed"] )
SCREAMING_SNAKE_CASE_ = int(config["batch_size"] )
set_seed(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_ = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_ = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_ = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
SCREAMING_SNAKE_CASE_ = os.path.split(__UpperCamelCase )[-1].split("." )[0]
accelerator.init_trackers(__UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
SCREAMING_SNAKE_CASE_ = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_ = model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
SCREAMING_SNAKE_CASE_ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
SCREAMING_SNAKE_CASE_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __UpperCamelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(__UpperCamelCase ),
"epoch": epoch,
} , step=__UpperCamelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def a__ ( ):
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=__UpperCamelCase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 706
|
from __future__ import annotations
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = str(__UpperCamelCase )
return n == n[::-1]
def a__ ( __UpperCamelCase = 1_0_0_0_0_0_0 ):
SCREAMING_SNAKE_CASE_ = 0
for i in range(1 , __UpperCamelCase ):
if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 356
| 0
|
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase ( _lowercase):
"""simple docstring"""
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase ( _lowercase , _lowercase):
"""simple docstring"""
lowerCAmelCase_ = 1
@register_to_config
def __init__( self : str , UpperCamelCase__ : int = 2000 , UpperCamelCase__ : float = 0.15 , UpperCamelCase__ : float = 0.01 , UpperCamelCase__ : float = 1348.0 , UpperCamelCase__ : float = 1E-5 , UpperCamelCase__ : int = 1 , ) -> Optional[Any]:
_UpperCamelCase =sigma_max
# setable values
_UpperCamelCase =None
self.set_sigmas(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def UpperCamelCase__ ( self : Any , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ) -> Union[str, Any]:
return sample
def UpperCamelCase__ ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : float = None , UpperCamelCase__ : Union[str, torch.device] = None ) -> Tuple:
_UpperCamelCase =sampling_eps if sampling_eps is not None else self.config.sampling_eps
_UpperCamelCase =torch.linspace(1 , __lowerCAmelCase , __lowerCAmelCase , device=__lowerCAmelCase )
def UpperCamelCase__ ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : float = None , UpperCamelCase__ : float = None , UpperCamelCase__ : float = None ) -> Tuple:
_UpperCamelCase =sigma_min if sigma_min is not None else self.config.sigma_min
_UpperCamelCase =sigma_max if sigma_max is not None else self.config.sigma_max
_UpperCamelCase =sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__lowerCAmelCase , __lowerCAmelCase )
_UpperCamelCase =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_UpperCamelCase =torch.exp(torch.linspace(math.log(__lowerCAmelCase ) , math.log(__lowerCAmelCase ) , __lowerCAmelCase ) )
_UpperCamelCase =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def UpperCamelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def UpperCamelCase__ ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.Generator] = None , UpperCamelCase__ : bool = True , ) -> Tuple:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
_UpperCamelCase =timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_UpperCamelCase =(timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_UpperCamelCase =timesteps.to(self.discrete_sigmas.device )
_UpperCamelCase =self.discrete_sigmas[timesteps].to(sample.device )
_UpperCamelCase =self.get_adjacent_sigma(__lowerCAmelCase , __lowerCAmelCase ).to(sample.device )
_UpperCamelCase =torch.zeros_like(__lowerCAmelCase )
_UpperCamelCase =(sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_UpperCamelCase =diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_UpperCamelCase =diffusion.unsqueeze(-1 )
_UpperCamelCase =drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_UpperCamelCase =randn_tensor(
sample.shape , layout=sample.layout , generator=__lowerCAmelCase , device=sample.device , dtype=sample.dtype )
_UpperCamelCase =sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_UpperCamelCase =prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__lowerCAmelCase , prev_sample_mean=__lowerCAmelCase )
def UpperCamelCase__ ( self : List[Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.Generator] = None , UpperCamelCase__ : bool = True , ) -> str:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_UpperCamelCase =randn_tensor(sample.shape , layout=sample.layout , generator=__lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_UpperCamelCase =torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_UpperCamelCase =torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_UpperCamelCase =(self.config.snr * noise_norm / grad_norm) ** 2 * 2
_UpperCamelCase =step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_UpperCamelCase =step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_UpperCamelCase =step_size.unsqueeze(-1 )
_UpperCamelCase =sample + step_size * model_output
_UpperCamelCase =prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCAmelCase )
def UpperCamelCase__ ( self : Any , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , ) -> str:
_UpperCamelCase =timesteps.to(original_samples.device )
_UpperCamelCase =self.discrete_sigmas.to(original_samples.device )[timesteps]
_UpperCamelCase =(
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__lowerCAmelCase ) * sigmas[:, None, None, None]
)
_UpperCamelCase =noise + original_samples
return noisy_samples
def __len__( self : Optional[int] ) -> Tuple:
return self.config.num_train_timesteps
| 404
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
class __snake_case ( Generic[T]):
def __init__( self : int , __lowerCAmelCase : T ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = data
_lowerCamelCase : Node[T] | None = None
def __str__( self : Optional[Any] ):
"""simple docstring"""
return f'''{self.data}'''
class __snake_case ( Generic[T]):
def __init__( self : int ):
"""simple docstring"""
_lowerCamelCase : Node[T] | None = None
def __iter__( self : str ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.top
while node:
yield node.data
_lowerCamelCase : Any = node.next
def __str__( self : int ):
"""simple docstring"""
return "->".join([str(__lowerCAmelCase ) for item in self] )
def __len__( self : int ):
"""simple docstring"""
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return self.top is None
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : T ):
"""simple docstring"""
_lowerCamelCase : Tuple = Node(__lowerCAmelCase )
if not self.is_empty():
_lowerCamelCase : Optional[int] = self.top
_lowerCamelCase : List[str] = node
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , __lowerCAmelCase )
_lowerCamelCase : Any = self.top
_lowerCamelCase : Any = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 83
| 0
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , ):
"""simple docstring"""
snake_case__ : Tuple = cipher_alphabet or [chr(_A ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
snake_case__ : Dict = {
"a": 0.0_8_4_9_7,
"b": 0.0_1_4_9_2,
"c": 0.0_2_2_0_2,
"d": 0.0_4_2_5_3,
"e": 0.1_1_1_6_2,
"f": 0.0_2_2_2_8,
"g": 0.0_2_0_1_5,
"h": 0.0_6_0_9_4,
"i": 0.0_7_5_4_6,
"j": 0.0_0_1_5_3,
"k": 0.0_1_2_9_2,
"l": 0.0_4_0_2_5,
"m": 0.0_2_4_0_6,
"n": 0.0_6_7_4_9,
"o": 0.0_7_5_0_7,
"p": 0.0_1_9_2_9,
"q": 0.0_0_0_9_5,
"r": 0.0_7_5_8_7,
"s": 0.0_6_3_2_7,
"t": 0.0_9_3_5_6,
"u": 0.0_2_7_5_8,
"v": 0.0_0_9_7_8,
"w": 0.0_2_5_6_0,
"x": 0.0_0_1_5_0,
"y": 0.0_1_9_9_4,
"z": 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
snake_case__ : Optional[int] = frequencies_dict
if not case_sensitive:
snake_case__ : str = ciphertext.lower()
# Chi squared statistic values
snake_case__ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_A ) ):
snake_case__ : Optional[Any] = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
snake_case__ : Dict = (alphabet_letters.index(letter.lower() ) - shift) % len(
_A )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
snake_case__ : str = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
snake_case__ : List[str] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
snake_case__ : List[str] = decrypted_with_shift.lower().count(_A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
snake_case__ : List[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
snake_case__ : str = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
snake_case__ : Any = decrypted_with_shift.count(_A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
snake_case__ : str = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
snake_case__ : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
snake_case__ : Optional[int] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(UpperCAmelCase ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
snake_case__ : int = min(
_A , key=_A , )
# Get all the data from the most likely cipher (key, decoded message)
(
snake_case__
) : int = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 705
|
'''simple docstring'''
import math
from datetime import datetime, timedelta
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : List[str] = year % 19
snake_case__ : Optional[Any] = year % 4
snake_case__ : Optional[Any] = year % 7
snake_case__ : List[str] = math.floor(year / 100 )
snake_case__ : int = math.floor((13 + 8 * leap_day_inhibits) / 25 )
snake_case__ : Dict = leap_day_inhibits / 4
snake_case__ : Optional[int] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
snake_case__ : Optional[int] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
snake_case__ : Optional[Any] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
snake_case__ : Optional[int] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(UpperCAmelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(UpperCAmelCase , 4 , 18 )
else:
return datetime(UpperCAmelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
lowerCAmelCase__ = 'will be' if year > datetime.now().year else 'was'
print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
| 172
| 0
|
from __future__ import annotations
import time
import numpy as np
UpperCamelCase = [8, 5, 9, 7]
UpperCamelCase = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
UpperCamelCase = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
__A : Optional[Any] = claim_vector
__A : Any = allocated_resources_table
__A : Optional[int] = maximum_claim_table
def __UpperCAmelCase( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __UpperCAmelCase( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __UpperCAmelCase( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(A_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __UpperCAmelCase( self ):
return {self.__need().index(A_ ): i for i in self.__need()}
def __UpperCAmelCase( self , **__UpperCAmelCase ):
__A : List[Any] = self.__need()
__A : Optional[int] = self.__allocated_resources_table
__A : Optional[int] = self.__available_resources()
__A : Tuple = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
__A : List[str] = False
for each_need in need_list:
__A : List[Any] = True
for index, need in enumerate(A_ ):
if need > available_resources[index]:
__A : Union[str, Any] = False
break
if execution:
__A : str = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__A : Tuple = original_need_index
print(F"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(A_ )
# update available/freed resources stack
__A : int = np.array(A_ ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(A_ ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def __UpperCAmelCase( self ):
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
F"P{self.__allocated_resources_table.index(A_ ) + 1}"
+ " ".join(F"{it:>8}" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
F"P{self.__maximum_claim_table.index(A_ ) + 1}"
+ " ".join(F"{it:>8}" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(A_ ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(A_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 520
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
if len(UpperCAmelCase__ ) == 0:
return array
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ ), max(UpperCAmelCase__ )
# Compute the variables
_SCREAMING_SNAKE_CASE = _max - _min + 1
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_SCREAMING_SNAKE_CASE = i - _min
_SCREAMING_SNAKE_CASE = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_SCREAMING_SNAKE_CASE = 0
for i in range(UpperCAmelCase__ ):
while holes_repeat[i] > 0:
_SCREAMING_SNAKE_CASE = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : Dict = input('Enter numbers separated by comma:\n')
snake_case : List[Any] = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 605
| 0
|
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def _snake_case ( lowercase__ , lowercase__ ):
# ===== initialization =====
_lowerCamelCase : List[Any] = Mock()
_lowerCamelCase : List[str] = conn, Mock()
_lowerCamelCase : Tuple = iter([1, None] )
_lowerCamelCase : Dict = lambda lowercase__ : next(lowercase__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=lowercase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 492
|
"""simple docstring"""
def _snake_case ( lowercase__ = 1000 ):
_lowerCamelCase, _lowerCamelCase : Optional[int] = 1, 1
_lowerCamelCase : int = []
for i in range(1 , n + 1 ):
_lowerCamelCase : Tuple = prev_numerator + 2 * prev_denominator
_lowerCamelCase : str = prev_numerator + prev_denominator
if len(str(lowercase__ ) ) > len(str(lowercase__ ) ):
result.append(lowercase__ )
_lowerCamelCase : Dict = numerator
_lowerCamelCase : Optional[int] = denominator
return len(lowercase__ )
if __name__ == "__main__":
print(F"{solution() = }")
| 492
| 1
|
from __future__ import annotations
def A ( __UpperCamelCase , __UpperCamelCase ) -> list[int]:
A__ = 0
A__ = len(__UpperCamelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
A__ = i + 1
else:
A__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{two_pointer([2, 7, 1_1, 1_5], 9) = }')
| 9
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
warnings.warn(
"The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ChineseCLIPImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 645
| 0
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 32
def _snake_case (_snake_case : Accelerator , _snake_case : int = 16 , _snake_case : str = "bert-base-cased") -> str:
_lowercase =AutoTokenizer.from_pretrained(_snake_case)
_lowercase =load_dataset('glue' , 'mrpc')
def tokenize_function(_snake_case : Optional[int]):
# max_length=None => use the model max length (it's actually the default)
_lowercase =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_snake_case , max_length=_snake_case)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowercase =datasets.map(
_snake_case , batched=_snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_snake_case)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowercase =tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(_snake_case : int):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_snake_case , padding='max_length' , max_length=128 , return_tensors='pt')
return tokenizer.pad(_snake_case , padding='longest' , return_tensors='pt')
# Instantiate dataloaders.
_lowercase =DataLoader(
tokenized_datasets['train'] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case)
_lowercase =DataLoader(
tokenized_datasets['validation'] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case)
return train_dataloader, eval_dataloader
def _snake_case (_snake_case : List[Any] , _snake_case : Any , _snake_case : str , _snake_case : Union[str, Any]) -> Optional[int]:
model.eval()
_lowercase =0
for step, batch in enumerate(_snake_case):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
_lowercase =model(**_snake_case)
_lowercase =outputs.logits.argmax(dim=-1)
# It is slightly faster to call this once, than multiple times
_lowercase , _lowercase =accelerator.gather(
(predictions, batch['labels'])) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_snake_case) - 1:
_lowercase =predictions[: len(eval_dataloader.dataset) - samples_seen]
_lowercase =references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_snake_case , references=_snake_case , )
_lowercase =metric.compute()
return eval_metric["accuracy"]
def _snake_case (_snake_case : int , _snake_case : List[str]) -> Optional[int]:
# Initialize accelerator
_lowercase =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase =config['lr']
_lowercase =int(config['num_epochs'])
_lowercase =int(config['seed'])
_lowercase =int(config['batch_size'])
_lowercase =args.model_name_or_path
set_seed(_snake_case)
_lowercase , _lowercase =get_dataloaders(_snake_case , _snake_case , _snake_case)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase =AutoModelForSequenceClassification.from_pretrained(_snake_case , return_dict=_snake_case)
# Instantiate optimizer
_lowercase =(
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowercase =optimizer_cls(params=model.parameters() , lr=_snake_case)
if accelerator.state.deepspeed_plugin is not None:
_lowercase =accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
_lowercase =1
_lowercase =(len(_snake_case) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowercase =get_linear_schedule_with_warmup(
optimizer=_snake_case , num_warmup_steps=0 , num_training_steps=_snake_case , )
else:
_lowercase =DummyScheduler(_snake_case , total_num_steps=_snake_case , warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =accelerator.prepare(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case)
# We need to keep track of how many total steps we have iterated over
_lowercase =0
# We also need to keep track of the stating epoch so files are named properly
_lowercase =0
_lowercase =evaluate.load('glue' , 'mrpc')
_lowercase =num_epochs
if args.partial_train_epoch is not None:
_lowercase =args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint)
_lowercase =args.resume_from_checkpoint.split('epoch_')[1]
_lowercase =''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_lowercase =int(_snake_case) + 1
_lowercase =evaluation_loop(_snake_case , _snake_case , _snake_case , _snake_case)
accelerator.print('resumed checkpoint performance:' , _snake_case)
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0])
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'])
with open(os.path.join(args.output_dir , f'''state_{starting_epoch-1}.json''') , 'r') as f:
_lowercase =json.load(_snake_case)
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_lowercase ={}
for epoch in range(_snake_case , _snake_case):
model.train()
for step, batch in enumerate(_snake_case):
_lowercase =model(**_snake_case)
_lowercase =outputs.loss
_lowercase =loss / gradient_accumulation_steps
accelerator.backward(_snake_case)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_lowercase =f'''epoch_{epoch}'''
_lowercase =os.path.join(args.output_dir , _snake_case)
accelerator.save_state(_snake_case)
_lowercase =evaluation_loop(_snake_case , _snake_case , _snake_case , _snake_case)
_lowercase =accuracy
_lowercase =lr_scheduler.get_lr()[0]
_lowercase =optimizer.param_groups[0]['lr']
_lowercase =epoch
_lowercase =overall_step
accelerator.print(f'''epoch {epoch}:''' , _snake_case)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'''state_{epoch}.json''') , 'w') as f:
json.dump(_snake_case , _snake_case)
def _snake_case () -> List[Any]:
_lowercase =argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.')
parser.add_argument(
'--model_name_or_path' , type=_snake_case , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_snake_case , )
parser.add_argument(
'--output_dir' , type=_snake_case , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=_snake_case , default=_snake_case , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=_snake_case , default=_snake_case , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=_snake_case , default=2 , help='Number of train epochs.' , )
_lowercase =parser.parse_args()
_lowercase ={'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_snake_case , _snake_case)
if __name__ == "__main__":
main()
| 557
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : UNetaDModel
__lowerCAmelCase : KarrasVeScheduler
def __init__( self :List[Any], snake_case :UNetaDModel, snake_case :KarrasVeScheduler):
"""simple docstring"""
super().__init__()
self.register_modules(unet=snake_case, scheduler=snake_case)
@torch.no_grad()
def __call__( self :List[str], snake_case :int = 1, snake_case :int = 50, snake_case :Optional[Union[torch.Generator, List[torch.Generator]]] = None, snake_case :Optional[str] = "pil", snake_case :bool = True, **snake_case :Union[str, Any], ):
"""simple docstring"""
_lowercase =self.unet.config.sample_size
_lowercase =(batch_size, 3, img_size, img_size)
_lowercase =self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_lowercase =randn_tensor(snake_case, generator=snake_case, device=self.device) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(snake_case)
for t in self.progress_bar(self.scheduler.timesteps):
# here sigma_t == t_i from the paper
_lowercase =self.scheduler.schedule[t]
_lowercase =self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_lowercase , _lowercase =self.scheduler.add_noise_to_input(snake_case, snake_case, generator=snake_case)
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowercase =(sigma_hat / 2) * model((sample_hat + 1) / 2, sigma_hat / 2).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_lowercase =self.scheduler.step(snake_case, snake_case, snake_case, snake_case)
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowercase =(sigma_prev / 2) * model((step_output.prev_sample + 1) / 2, sigma_prev / 2).sample
_lowercase =self.scheduler.step_correct(
snake_case, snake_case, snake_case, snake_case, step_output.prev_sample, step_output['derivative'], )
_lowercase =step_output.prev_sample
_lowercase =(sample / 2 + 0.5).clamp(0, 1)
_lowercase =sample.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowercase =self.numpy_to_pil(snake_case)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case)
| 557
| 1
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_a: Dict = logging.get_logger(__name__)
class __UpperCamelCase ( lowercase ):
def __init__( self : str , *lowerCAmelCase : Dict , **lowerCAmelCase : List[Any] ):
'''simple docstring'''
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , __A , )
super().__init__(*__A , **__A )
| 162
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = None, UpperCAmelCase__ = None, UpperCAmelCase__ = None, ) -> List[Any]:
if config_name_or_path is None:
A_ = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
A_ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
A_ = question_encoder_name_or_path
A_ = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
A_ = RagConfig.from_pretrained(UpperCAmelCase__ )
A_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
A_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
A_ = gen_config
A_ = question_encoder_config
A_ = model_class.from_pretrained_question_encoder_generator(
UpperCAmelCase__, UpperCAmelCase__, config=UpperCAmelCase__ )
rag_model.save_pretrained(UpperCAmelCase__ )
# Sanity check.
model_class.from_pretrained(UpperCAmelCase__ )
# Save tokenizers.
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 716
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( _snake_case ):
lowercase = "ClapFeatureExtractor"
lowercase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
A_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if audios is not None:
A_ = self.feature_extractor(
UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and audios is not None:
A_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.tokenizer.model_input_names
A_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 667
| 0
|
'''simple docstring'''
import sys
UpperCAmelCase_ : int = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def _UpperCamelCase (_lowerCamelCase : str )-> int:
'''simple docstring'''
__snake_case = 1
for digit in s:
product *= int(_lowerCamelCase )
return product
def _UpperCamelCase (_lowerCamelCase : str = N )-> int:
'''simple docstring'''
__snake_case = -sys.maxsize - 1
__snake_case = n[:13]
__snake_case = 13
while cur_index < len(_lowerCamelCase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__snake_case = substr[1:] + n[cur_index]
cur_index += 1
else:
__snake_case = max(_lowerCamelCase , str_eval(_lowerCamelCase ) )
__snake_case = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24
|
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_lowerCamelCase : List[Any] = logging.getLogger(__name__)
class snake_case__ ( __snake_case ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase_ : Tuple=-1 ) -> List[Any]:
# in NER datasets, the last column is usually reserved for NER label
UpperCAmelCase_ = label_idx
def UpperCamelCase ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[Split, str] ) -> List[InputExample]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ = mode.value
UpperCAmelCase_ = os.path.join(lowerCAmelCase_ , F'''{mode}.txt''' )
UpperCAmelCase_ = 1
UpperCAmelCase_ = []
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as f:
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=lowerCAmelCase_ , labels=lowerCAmelCase_ ) )
guid_index += 1
UpperCAmelCase_ = []
UpperCAmelCase_ = []
else:
UpperCAmelCase_ = line.split(''' ''' )
words.append(splits[0] )
if len(lowerCAmelCase_ ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=lowerCAmelCase_ , labels=lowerCAmelCase_ ) )
return examples
def UpperCamelCase ( self : Tuple , lowerCAmelCase_ : TextIO , lowerCAmelCase_ : TextIO , lowerCAmelCase_ : List ) -> str:
UpperCAmelCase_ = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(lowerCAmelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
UpperCAmelCase_ = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(lowerCAmelCase_ )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase_ : str ) -> List[str]:
if path:
with open(lowerCAmelCase_ , '''r''' ) as f:
UpperCAmelCase_ = f.read().splitlines()
if "O" not in labels:
UpperCAmelCase_ = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class snake_case__ ( __snake_case ):
'''simple docstring'''
def __init__( self : str ) -> Dict:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase_ : str ) -> List[str]:
if path:
with open(lowerCAmelCase_ , '''r''' ) as f:
UpperCAmelCase_ = f.read().splitlines()
if "O" not in labels:
UpperCAmelCase_ = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class snake_case__ ( __snake_case ):
'''simple docstring'''
def UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[Split, str] ) -> List[InputExample]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ = mode.value
UpperCAmelCase_ = os.path.join(lowerCAmelCase_ , F'''{mode}.txt''' )
UpperCAmelCase_ = 1
UpperCAmelCase_ = []
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as f:
for sentence in parse_incr(lowerCAmelCase_ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=lowerCAmelCase_ , labels=lowerCAmelCase_ ) )
guid_index += 1
return examples
def UpperCamelCase ( self : int , lowerCAmelCase_ : TextIO , lowerCAmelCase_ : TextIO , lowerCAmelCase_ : List ) -> List[str]:
UpperCAmelCase_ = 0
for sentence in parse_incr(lowerCAmelCase_ ):
UpperCAmelCase_ = preds_list[example_id]
UpperCAmelCase_ = ''''''
for token in sentence:
out += F'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowerCAmelCase_ )
example_id += 1
def UpperCamelCase ( self : Dict , lowerCAmelCase_ : str ) -> List[str]:
if path:
with open(lowerCAmelCase_ , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 121
| 0
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowercase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowercase : List[Any] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Optional[int] ):
__UpperCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
__UpperCAmelCase = self.diffusers_dir
shutil.copy(
os.path.join(_lowercase , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def a ( self : List[str] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Dict , _lowercase : Union[str, Any]=None ):
__UpperCAmelCase = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
__UpperCAmelCase = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
__UpperCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
__UpperCAmelCase = black.format_str(_lowercase , mode=_lowercase )
__UpperCAmelCase = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(_lowercase , '''w''' , newline='''\n''' ) as f:
f.write(_lowercase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_lowercase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_lowercase )
with open(_lowercase , '''r''' ) as f:
self.assertTrue(f.read() , _lowercase )
def a ( self : str ):
__UpperCAmelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(_lowercase , _lowercase )
def a ( self : int ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _lowercase , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _lowercase ) , )
# Copy consistency with a really long name
__UpperCAmelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub('''Bert''' , _lowercase , _lowercase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _lowercase , overwrite_result=re.sub('''DDPM''' , '''Test''' , _lowercase ) , )
| 397
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Tuple = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Any = "cvt"
def __init__( self : List[str] , _lowercase : str=3 , _lowercase : Tuple=[7, 3, 3] , _lowercase : Any=[4, 2, 2] , _lowercase : str=[2, 1, 1] , _lowercase : Union[str, Any]=[64, 1_92, 3_84] , _lowercase : Dict=[1, 3, 6] , _lowercase : List[str]=[1, 2, 10] , _lowercase : Optional[int]=[4.0, 4.0, 4.0] , _lowercase : Dict=[0.0, 0.0, 0.0] , _lowercase : Dict=[0.0, 0.0, 0.0] , _lowercase : Tuple=[0.0, 0.0, 0.1] , _lowercase : Dict=[True, True, True] , _lowercase : Union[str, Any]=[False, False, True] , _lowercase : Dict=["dw_bn", "dw_bn", "dw_bn"] , _lowercase : int=[3, 3, 3] , _lowercase : int=[1, 1, 1] , _lowercase : Optional[Any]=[2, 2, 2] , _lowercase : List[str]=[1, 1, 1] , _lowercase : int=[1, 1, 1] , _lowercase : Union[str, Any]=0.02 , _lowercase : Optional[Any]=1E-12 , **_lowercase : str , ):
super().__init__(**_lowercase )
__UpperCAmelCase = num_channels
__UpperCAmelCase = patch_sizes
__UpperCAmelCase = patch_stride
__UpperCAmelCase = patch_padding
__UpperCAmelCase = embed_dim
__UpperCAmelCase = num_heads
__UpperCAmelCase = depth
__UpperCAmelCase = mlp_ratio
__UpperCAmelCase = attention_drop_rate
__UpperCAmelCase = drop_rate
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = cls_token
__UpperCAmelCase = qkv_projection_method
__UpperCAmelCase = kernel_qkv
__UpperCAmelCase = padding_kv
__UpperCAmelCase = stride_kv
__UpperCAmelCase = padding_q
__UpperCAmelCase = stride_q
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
| 397
| 1
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowercase : Dict = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def snake_case__ ( lowerCamelCase_ ):
A : Tuple = test_results.split(''' ''' )
A : List[str] = 0
A : Union[str, Any] = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A : str = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCamelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def snake_case__ ( lowerCamelCase_ ):
A : Optional[Any] = {}
A : Any = None
A : Union[str, Any] = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , lowerCamelCase_ ):
A : str = True
A : Any = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
A : Tuple = line
A : str = False
return failures
class __lowercase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
A : Dict = title
A : List[Any] = doc_test_results['''time_spent'''].split(''',''' )[0]
A : Tuple = doc_test_results['''success''']
A : Union[str, Any] = doc_test_results['''failures''']
A : Dict = self.n_success + self.n_failures
# Failures and success of the modeling tests
A : List[str] = doc_test_results
@property
def snake_case ( self ) -> str:
A : Optional[Any] = [self._time_spent]
A : int = 0
for time in time_spent:
A : Any = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__UpperCAmelCase ) == 1:
A : Optional[Any] = [0, 0, time_parts[0]]
A , A , A : str = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
A , A , A : Optional[int] = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'{int(__UpperCAmelCase )}h{int(__UpperCAmelCase )}m{int(__UpperCAmelCase )}s'
@property
def snake_case ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def snake_case ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def snake_case ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def snake_case ( self ) -> Dict:
A : int = 40
A : Optional[Any] = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
A : int = ''''''
for category, failures in category_failures.items():
if len(__UpperCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__UpperCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def snake_case ( self ) -> str:
A : List[str] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__UpperCAmelCase )
@staticmethod
def snake_case ( ) -> List[str]:
A : List[Any] = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(__UpperCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=__UpperCAmelCase , )
def snake_case ( self ) -> Any:
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
A : Dict = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else '''All tests passed.'''
A : Any = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=__UpperCAmelCase , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
A : Union[str, Any] = ''''''
for key, value in failures.items():
A : Optional[int] = value[:2_00] + ''' [Truncated]''' if len(__UpperCAmelCase ) > 2_50 else value
failures_text += f'*{key}*\n_{value}_\n\n'
A : List[Any] = job_name
A : List[Any] = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
A : Any = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def snake_case ( self ) -> Optional[int]:
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
A : Any = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
A : Optional[int] = sorted(self.doc_test_results.items() , key=lambda __UpperCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
A : str = f'*Num failures* :{len(job_result["failed"] )} \n'
A : int = job_result['''failures''']
A : Dict = self.get_reply_blocks(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , text=__UpperCAmelCase )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=f'Results for {job}' , blocks=__UpperCAmelCase , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def snake_case__ ( ):
A : Optional[Any] = os.environ['''GITHUB_RUN_ID''']
A : Dict = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
A : Optional[int] = requests.get(lowerCamelCase_ ).json()
A : List[str] = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
A : Dict = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(lowerCamelCase_ ):
A : Optional[Any] = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , lowerCamelCase_ )
return {}
def snake_case__ ( lowerCamelCase_ ):
A : Optional[int] = {}
if os.path.exists(lowerCamelCase_ ):
A : List[Any] = os.listdir(lowerCamelCase_ )
for file in files:
try:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , encoding='''utf-8''' ) as f:
A : str = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowerCamelCase_ , lowerCamelCase_ )}.' ) from e
return _artifact
def snake_case__ ( ):
class __lowercase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase ) -> Union[str, Any]:
A : List[Any] = name
A : Dict = []
def __str__( self ) -> Any:
return self.name
def snake_case ( self , __UpperCAmelCase ) -> str:
self.paths.append({'''name''': self.name, '''path''': path} )
A : Dict[str, Artifact] = {}
A : Optional[Any] = filter(os.path.isdir , os.listdir() )
for directory in directories:
A : Dict = directory
if artifact_name not in _available_artifacts:
A : Union[str, Any] = Artifact(lowerCamelCase_ )
_available_artifacts[artifact_name].add_path(lowerCamelCase_ )
return _available_artifacts
if __name__ == "__main__":
lowercase : Tuple = get_job_links()
lowercase : Dict = retrieve_available_artifacts()
lowercase : Any = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowercase : Dict = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowercase : int = github_actions_job_links.get("run_doctests")
lowercase : Optional[int] = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
lowercase : List[str] = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
lowercase , lowercase , lowercase : List[Any] = handle_test_results(artifact["stats"])
lowercase : Union[str, Any] = failed
lowercase : Any = success
lowercase : str = time_spent[1:-1] + ", "
lowercase : Dict = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
lowercase : List[Any] = line.replace("FAILED ", "")
lowercase : Optional[Any] = line.split()[0].replace("\n", "")
if "::" in line:
lowercase , lowercase : Tuple = line.split("::")
else:
lowercase , lowercase : str = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowercase : List[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowercase : Dict = all_failures[test] if test in all_failures else "N/A"
lowercase : Tuple = failure
break
lowercase : Optional[int] = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 542
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=18 , __UpperCAmelCase=30 , __UpperCAmelCase=4_00 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , ) -> int:
A : Optional[Any] = size if size is not None else {'''height''': 18, '''width''': 18}
A : Dict = parent
A : Any = batch_size
A : Dict = num_channels
A : Optional[Any] = image_size
A : Dict = min_resolution
A : Optional[int] = max_resolution
A : Any = do_resize
A : List[Any] = size
A : List[Any] = apply_ocr
def snake_case ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self ) -> int:
A : Optional[Any] = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ) -> Optional[Any]:
A : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''size''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''apply_ocr''' ) )
def snake_case ( self ) -> Union[str, Any]:
A : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
A : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def snake_case ( self ) -> List[str]:
pass
def snake_case ( self ) -> List[str]:
# Initialize image_processing
A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
A : str = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , __UpperCAmelCase )
self.assertIsInstance(encoding.boxes , __UpperCAmelCase )
# Test batched
A : int = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def snake_case ( self ) -> Union[str, Any]:
# Initialize image_processing
A : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
A : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
A : int = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def snake_case ( self ) -> Optional[int]:
# Initialize image_processing
A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
A : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
A : Dict = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def snake_case ( self ) -> Any:
# with apply_OCR = True
A : Dict = LayoutLMvaImageProcessor()
from datasets import load_dataset
A : Dict = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
A : Any = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
A : List[Any] = image_processing(__UpperCAmelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A : Tuple = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
A : int = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __UpperCAmelCase )
self.assertListEqual(encoding.boxes , __UpperCAmelCase )
# with apply_OCR = False
A : Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase )
A : List[str] = image_processing(__UpperCAmelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 542
| 1
|
"""simple docstring"""
def _snake_case ( lowercase__ ):
if not nums: # Makes sure that the list is not empty
raise ValueError('List is empty' )
_lowerCamelCase : Optional[Any] = sum(lowercase__ ) / len(lowercase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 492
|
"""simple docstring"""
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = int(lowercase__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowercase__ )
_lowerCamelCase, _lowerCamelCase : Dict = divmod(lowercase__ , 2 )
return binary_recursive(lowercase__ ) + str(lowercase__ )
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = str(lowercase__ ).strip()
if not number:
raise ValueError('No input value was provided' )
_lowerCamelCase : str = '-' if number.startswith('-' ) else ''
_lowerCamelCase : Union[str, Any] = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return f'''{negative}0b{binary_recursive(int(lowercase__ ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 492
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__a: Tuple = logging.get_logger(__name__)
__a: List[Any] = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = '''convnextv2'''
def __init__( self : Tuple , lowerCamelCase : Tuple=3 , lowerCamelCase : Tuple=4 , lowerCamelCase : Tuple=4 , lowerCamelCase : str=None , lowerCamelCase : Optional[int]=None , lowerCamelCase : str="gelu" , lowerCamelCase : int=0.02 , lowerCamelCase : str=1E-12 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Optional[int]=224 , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Any=None , **lowerCamelCase : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_stages
_UpperCAmelCase = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
_UpperCAmelCase = [3, 3, 9, 3] if depths is None else depths
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = image_size
_UpperCAmelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
| 108
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
A__ : Any = {'UserAgent': UserAgent().random}
def _snake_case ( lowerCamelCase__ : Optional[Any] ) -> dict:
lowerCamelCase_ : Tuple =script.contents[0]
lowerCamelCase_ : Dict =json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase__ :
def __init__( self : Optional[Any] , snake_case__ : Dict ):
lowerCamelCase_ : List[str] =F"""https://www.instagram.com/{username}/"""
lowerCamelCase_ : str =self.get_json()
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Optional[Any] =requests.get(self.url , headers=snake_case__ ).text
lowerCamelCase_ : Union[str, Any] =BeautifulSoup(snake_case__ , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Optional[int] ):
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self : List[Any] ):
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return self.user_data["username"]
@property
def UpperCAmelCase__ ( self : Tuple ):
return self.user_data["full_name"]
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return self.user_data["biography"]
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return self.user_data["business_email"]
@property
def UpperCAmelCase__ ( self : str ):
return self.user_data["external_url"]
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return self.user_data["edge_followed_by"]["count"]
@property
def UpperCAmelCase__ ( self : List[str] ):
return self.user_data["edge_follow"]["count"]
@property
def UpperCAmelCase__ ( self : Dict ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def UpperCAmelCase__ ( self : int ):
return self.user_data["profile_pic_url_hd"]
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return self.user_data["is_verified"]
@property
def UpperCAmelCase__ ( self : str ):
return self.user_data["is_private"]
def _snake_case ( lowerCamelCase__ : str = "github" ) -> None:
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
lowerCamelCase_ : Dict =InstagramUser(lowerCamelCase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowerCamelCase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : List[Any] = InstagramUser('github')
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }')
| 153
| 0
|
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def A__ ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case_ ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def A__ ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def A__ ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case_ ):
http_head('''https://huggingface.co''' )
| 107
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: List[Any]= [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: List[str]= [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[int]= [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: str= [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[int]= [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE__: Any= '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> Any:
SCREAMING_SNAKE_CASE__: List[str]= [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE__: Dict= '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> Dict:
# pass variant but use the non-variant filenames
SCREAMING_SNAKE_CASE__: List[str]= [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
SCREAMING_SNAKE_CASE__: Union[str, Any]= '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Dict= [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE__: List[Any]= '''fp16'''
self.assertFalse(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: int= [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE__: Any= '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> Optional[Any]:
# pass variant but use the non-variant filenames
SCREAMING_SNAKE_CASE__: Optional[int]= [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
SCREAMING_SNAKE_CASE__: str= '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Tuple= [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE__: str= '''fp16'''
self.assertFalse(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
| 107
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.