code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'{price_plus_tax(100, 0.25) = }')
print(F'{price_plus_tax(125.50, 0.05) = }')
| 41
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'swin'
lowerCamelCase__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=[2, 2, 6, 2], __a=[3, 6, 12, 24], __a=7, __a=4.0, __a=True, __a=0.0, __a=0.0, __a=0.1, __a="gelu", __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Any = image_size
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : Tuple = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Optional[Any] = len(__a)
_lowerCAmelCase : int = num_heads
_lowerCAmelCase : int = window_size
_lowerCAmelCase : int = mlp_ratio
_lowerCAmelCase : List[Any] = qkv_bias
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Any = drop_path_rate
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Tuple = use_absolute_embeddings
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Tuple = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : List[str] = int(embed_dim * 2 ** (len(__a) - 1))
_lowerCAmelCase : List[Any] = ["stem"] + [f"stage{idx}" for idx in range(1, len(__a) + 1)]
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 36
| 0
|
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowercase : Dict = "__DUMMY_TRANSFORMERS_USER__"
lowercase : Union[str, Any] = "Dummy User"
lowercase : List[Any] = "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"
lowercase : List[str] = "https://hub-ci.huggingface.co"
lowercase : Optional[Any] = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
lowercase : str = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
lowercase : str = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( __A ) -> Any:
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , __A )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( __A ) -> int:
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , __A )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , __A )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( __A ) -> Tuple:
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , __A )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Tuple:
HfFolder.save_token(__A )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
return HfApi(endpoint=__A )
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( __A ) -> int:
_snake_case = HfFolder.get_token()
HfFolder.save_token(__A )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__A )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( __A ) -> Dict:
def _cleanup_repo(__A ):
hf_api.delete_repo(__A , token=__A , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( __A ) -> List[str]:
@contextmanager
def _temporary_repo(__A ):
try:
yield repo_id
finally:
cleanup_repo(__A )
return _temporary_repo
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Any:
_snake_case = F'repo_txt_data-{int(time.time() * 10e3 )}'
_snake_case = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(__A , token=__A , repo_type='dataset' , private=__A )
hf_api.upload_file(
token=__A , path_or_fileobj=str(__A ) , path_in_repo='data/text_data.txt' , repo_id=__A , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(__A , token=__A , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Union[str, Any]:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> str:
_snake_case = F'repo_zipped_txt_data-{int(time.time() * 10e3 )}'
_snake_case = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(__A , token=__A , repo_type='dataset' , private=__A )
hf_api.upload_file(
token=__A , path_or_fileobj=str(__A ) , path_in_repo='data.zip' , repo_id=__A , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(__A , token=__A , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Tuple:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> str:
_snake_case = F'repo_zipped_img_data-{int(time.time() * 10e3 )}'
_snake_case = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(__A , token=__A , repo_type='dataset' , private=__A )
hf_api.upload_file(
token=__A , path_or_fileobj=str(__A ) , path_in_repo='data.zip' , repo_id=__A , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(__A , token=__A , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Optional[int]:
return hf_private_dataset_repo_zipped_img_data_
| 42
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 36
| 0
|
import datasets
__lowercase = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
__lowercase = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
__lowercase = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32'''),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32'''),
}) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Optional[Any]:
return {"accuracy": simple_accuracy(__lowercase , __lowercase)}
| 43
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
_snake_case = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if got_ver is None or want_ver is None:
raise ValueError(
F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
F" reinstalling {pkg}." )
if not ops[op](version.parse(_lowerCamelCase ) , version.parse(_lowerCamelCase ) ):
raise ImportError(
F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def A ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = F"\n{hint}" if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$" , _lowerCamelCase ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = requirement, None, None
else:
_lowerCAmelCase : Optional[int] = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , _lowerCamelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
F" got {requirement}" )
_lowerCAmelCase , _lowerCAmelCase : Dict = match[0]
_lowerCAmelCase : Any = want_full.split("," ) # there could be multiple requirements
_lowerCAmelCase : Optional[int] = {}
for w in want_range:
_lowerCAmelCase : Any = re.findall(r"^([\s!=<>]{1,2})(.+)" , _lowerCamelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
F" but got {requirement}" )
_lowerCAmelCase , _lowerCAmelCase : Tuple = match[0]
_lowerCAmelCase : Union[str, Any] = want_ver
if op not in ops:
raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
_lowerCAmelCase : Tuple = ".".join([str(_lowerCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return
# check if any version is installed
try:
_lowerCAmelCase : Any = importlib.metadata.version(_lowerCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(_lowerCamelCase , _lowerCamelCase )
| 36
| 0
|
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_a : str = logging.get_logger(__name__)
_a : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : int = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_a : int = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_a : List[str] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
_a : List[str] = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
_a : Optional[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
_a : Union[str, Any] = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
_a : Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
_a : Any = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
_a : Dict = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Tuple = DPRContextEncoderTokenizer
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Tuple = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Tuple = DPRQuestionEncoderTokenizer
_a : List[Any] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
_a : Tuple = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
_a : List[str] = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
class __A :
def __call__( self , a__ , a__ = None , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , **a__ , ):
if titles is None and texts is None:
return super().__call__(
a__ , padding=a__ , truncation=a__ , max_length=a__ , return_tensors=a__ , return_attention_mask=a__ , **a__ , )
elif titles is None or texts is None:
_lowerCAmelCase : Union[str, Any] = titles if texts is None else texts
return super().__call__(
a__ , a__ , padding=a__ , truncation=a__ , max_length=a__ , return_tensors=a__ , return_attention_mask=a__ , **a__ , )
_lowerCAmelCase : Union[str, Any] = titles if not isinstance(a__ , a__ ) else [titles]
_lowerCAmelCase : List[Any] = texts if not isinstance(a__ , a__ ) else [texts]
_lowerCAmelCase : Union[str, Any] = len(a__ )
_lowerCAmelCase : Union[str, Any] = questions if not isinstance(a__ , a__ ) else [questions] * n_passages
assert len(a__ ) == len(
a__ ), F"There should be as many titles than texts but got {len(a__ )} titles and {len(a__ )} texts."
_lowerCAmelCase : str = super().__call__(a__ , a__ , padding=a__ , truncation=a__ )["""input_ids"""]
_lowerCAmelCase : str = super().__call__(a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ )["""input_ids"""]
_lowerCAmelCase : Dict = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(a__ , a__ )
]
}
if return_attention_mask is not False:
_lowerCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase : Optional[Any] = attention_mask
return self.pad(a__ , padding=a__ , max_length=a__ , return_tensors=a__ )
def __A ( self , a__ , a__ , a__ = 16 , a__ = 64 , a__ = 4 , ):
_lowerCAmelCase : Dict = reader_input["""input_ids"""]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = reader_output[:3]
_lowerCAmelCase : List[str] = len(a__ )
_lowerCAmelCase : Union[str, Any] = sorted(range(a__ ) , reverse=a__ , key=relevance_logits.__getitem__ )
_lowerCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCAmelCase : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase : List[str] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase : Tuple = len(a__ )
_lowerCAmelCase : int = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=a__ , top_spans=a__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=a__ , start_index=a__ , end_index=a__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(a__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __A ( self , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : List[str] = []
for start_index, start_score in enumerate(a__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase : Optional[Any] = sorted(a__ , key=lambda a__ : x[1] , reverse=a__ )
_lowerCAmelCase : Union[str, Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"Wrong span indices: [{start_index}:{end_index}]"
_lowerCAmelCase : Union[str, Any] = end_index - start_index + 1
assert length <= max_answer_length, F"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(a__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Union[str, Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : int = ["input_ids", "attention_mask"]
_UpperCamelCase : Optional[int] = DPRReaderTokenizer
| 44
|
import argparse
from collections import defaultdict
import yaml
_snake_case = "docs/source/en/_toctree.yml"
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = defaultdict(_lowerCamelCase )
_lowerCAmelCase : Any = []
_lowerCAmelCase : List[str] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = new_doc_list
_lowerCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase : str = []
for duplicate_key in duplicates:
_lowerCAmelCase : List[str] = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(_lowerCamelCase ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
_lowerCAmelCase : Optional[Any] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowerCamelCase ) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed." )
overview_doc.extend(_lowerCamelCase )
# Sort
return overview_doc
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : int = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : List[str] = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : Union[str, Any] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowerCAmelCase : Optional[Any] = api_doc[scheduler_idx]["sections"]
_lowerCAmelCase : Optional[Any] = clean_doc_toc(_lowerCamelCase )
_lowerCAmelCase : int = False
if new_scheduler_doc != scheduler_doc:
_lowerCAmelCase : List[Any] = True
if overwrite:
_lowerCAmelCase : Dict = new_scheduler_doc
if diff:
if overwrite:
_lowerCAmelCase : Tuple = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : int = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : List[str] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Optional[int] = api_doc[pipeline_idx]["sections"]
_lowerCAmelCase : Tuple = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowerCAmelCase : List[Any] = pipeline_doc["section"]
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
if overwrite:
_lowerCAmelCase : Optional[Any] = new_sub_pipeline_doc
new_pipeline_docs.append(_lowerCamelCase )
# sort overall pipeline doc
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
if new_pipeline_docs != pipeline_docs:
_lowerCAmelCase : Dict = True
if overwrite:
_lowerCAmelCase : Optional[int] = new_pipeline_docs
if diff:
if overwrite:
_lowerCAmelCase : Optional[int] = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_snake_case = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 36
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase_ = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str]=8 ) -> str:
__a = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__a = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , ):
super().__init__()
self.register_modules(
unet=_a , scheduler=_a , movq=_a , )
__a = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a ):
if latents is None:
__a = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
__a = latents.to(_a )
__a = latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self , _a=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__a = torch.device(f'''cuda:{gpu_id}''' )
__a = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
def __UpperCAmelCase ( self , _a=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
__a = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__a = None
for cpu_offloaded_model in [self.unet, self.movq]:
__a , __a = cpu_offload_with_hook(_a , _a , prev_module_hook=_a )
# We'll offload the last model manually.
__a = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self , _a , _a , _a = 512 , _a = 512 , _a = 100 , _a = 4.0 , _a = 1 , _a = None , _a = None , _a = "pil" , _a = True , ):
__a = self._execution_device
__a = guidance_scale > 1.0
if isinstance(_a , _a ):
__a = torch.cat(_a , dim=0 )
__a = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_a , _a ):
__a = torch.cat(_a , dim=0 )
if do_classifier_free_guidance:
__a = image_embeds.repeat_interleave(_a , dim=0 )
__a = negative_image_embeds.repeat_interleave(_a , dim=0 )
__a = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_a )
self.scheduler.set_timesteps(_a , device=_a )
__a = self.scheduler.timesteps
__a = self.unet.config.in_channels
__a , __a = downscale_height_and_width(_a , _a , self.movq_scale_factor )
# create initial latent
__a = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
__a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a = {'''image_embeds''': image_embeds}
__a = self.unet(
sample=_a , timestep=_a , encoder_hidden_states=_a , added_cond_kwargs=_a , return_dict=_a , )[0]
if do_classifier_free_guidance:
__a , __a = noise_pred.split(latents.shape[1] , dim=1 )
__a , __a = noise_pred.chunk(2 )
__a , __a = variance_pred.chunk(2 )
__a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__a = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__a , __a = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(
_a , _a , _a , generator=_a , )[0]
# post-processing
__a = self.movq.decode(_a , force_not_quantize=_a )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
__a = image * 0.5 + 0.5
__a = image.clamp(0 , 1 )
__a = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__a = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 45
|
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
| 0
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
SCREAMING_SNAKE_CASE__ = f'https://www.google.com/search?q={query}&num=100'
SCREAMING_SNAKE_CASE__ = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
SCREAMING_SNAKE_CASE__ = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
SCREAMING_SNAKE_CASE__ = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 46
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
_snake_case = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class UpperCAmelCase_ ( a):
def __init__( self, __a = 101):
'''simple docstring'''
_lowerCAmelCase : str = length
def __len__( self):
'''simple docstring'''
return self.length
def __getitem__( self, __a):
'''simple docstring'''
return i
class UpperCAmelCase_ :
def __call__( self, __a):
'''simple docstring'''
return {"input_ids": torch.tensor(__a), "labels": torch.tensor(__a)}
class UpperCAmelCase_ ( nn.Module):
def __init__( self):
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_lowerCAmelCase : str = nn.Linear(120, 80)
def snake_case__ ( self, __a, __a=None):
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0, device=input_ids.device), input_ids
else:
return input_ids
class UpperCAmelCase_ ( a):
@require_torch_neuroncore
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = f"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
_lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[int] = f"--output_dir {output_dir}".split()
_lowerCAmelCase : List[Any] = ["torchrun"] + distributed_args + args
execute_subprocess_async(__a, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class UpperCAmelCase_ ( a):
@require_torch_multi_gpu
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = f"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
_lowerCAmelCase : Any = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[int] = f"--output_dir {output_dir}".split()
_lowerCAmelCase : Any = ["torchrun"] + distributed_args + args
execute_subprocess_async(__a, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
_snake_case = HfArgumentParser((TrainingArguments,))
_snake_case = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
_snake_case = DummyDataset(dataset_length)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = list(range(len(_lowerCamelCase ) ) )
_lowerCAmelCase : Union[str, Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
F"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
_snake_case = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
_snake_case = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_snake_case = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_snake_case = 2
_snake_case = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_snake_case = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_snake_case = None
| 36
| 0
|
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def _lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple=False ) -> Dict:
"""simple docstring"""
try:
_SCREAMING_SNAKE_CASE =os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_SCREAMING_SNAKE_CASE =default
else:
# KEY is set, convert it to True or False.
try:
_SCREAMING_SNAKE_CASE =strtobool(_UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
lowerCamelCase : Dict = parse_flag_from_env("RUN_SLOW", default=False)
def _lowerCAmelCase ( _UpperCamelCase : str ) -> List[Any]:
"""simple docstring"""
return unittest.skip('Test was skipped' )(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : Dict ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> Any:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : str ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : str ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : int=None , _UpperCamelCase : List[Any]=None ) -> List[Any]:
"""simple docstring"""
if test_case is None:
return partial(_UpperCamelCase , version=_UpperCamelCase )
return unittest.skipUnless(is_torch_version('>=' , _UpperCamelCase ) , f"test requires torch version >= {version}" )(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCamelCase )
lowerCamelCase : List[str] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCamelCase )
class A__ ( unittest.TestCase ):
A__ = True
@classmethod
def A ( cls : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
@classmethod
def A ( cls : int ) -> List[str]:
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def A ( self : Any ) -> Dict:
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_a )
class A__ ( unittest.TestCase ):
def A ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class A__ ( unittest.TestCase ):
def A ( self : str , _a : Union[mock.Mock, List[mock.Mock]] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =mocks if isinstance(_a , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def _lowerCAmelCase ( _UpperCamelCase : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AcceleratorState()
_SCREAMING_SNAKE_CASE =tensor[None].clone().to(state.device )
_SCREAMING_SNAKE_CASE =gather(_UpperCamelCase ).cpu()
_SCREAMING_SNAKE_CASE =tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCamelCase ):
return False
return True
class A__ :
def __init__( self : Dict , _a : Any , _a : Dict , _a : Tuple ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =returncode
_SCREAMING_SNAKE_CASE =stdout
_SCREAMING_SNAKE_CASE =stderr
async def _lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[Any] ) -> List[str]:
"""simple docstring"""
while True:
_SCREAMING_SNAKE_CASE =await stream.readline()
if line:
callback(_UpperCamelCase )
else:
break
async def _lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=None , _UpperCamelCase : int=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : str=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('\nRunning: ' , ' '.join(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
def tee(_UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict="" ):
_SCREAMING_SNAKE_CASE =line.decode('utf-8' ).rstrip()
sink.append(_UpperCamelCase )
if not quiet:
print(_UpperCamelCase , _UpperCamelCase , file=_UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=_UpperCamelCase , )
return _RunOutput(await p.wait() , _UpperCamelCase , _UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Dict=None , _UpperCamelCase : Tuple=None , _UpperCamelCase : Optional[int]=1_80 , _UpperCamelCase : Dict=False , _UpperCamelCase : Optional[int]=True ) -> _RunOutput:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =asyncio.get_event_loop()
_SCREAMING_SNAKE_CASE =loop.run_until_complete(
_stream_subprocess(_UpperCamelCase , env=_UpperCamelCase , stdin=_UpperCamelCase , timeout=_UpperCamelCase , quiet=_UpperCamelCase , echo=_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =' '.join(_UpperCamelCase )
if result.returncode > 0:
_SCREAMING_SNAKE_CASE ='\n'.join(result.stderr )
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
return result
class A__ ( A__ ):
pass
def _lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Any=False ) -> Tuple:
"""simple docstring"""
try:
_SCREAMING_SNAKE_CASE =subprocess.check_output(_UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCamelCase , 'decode' ):
_SCREAMING_SNAKE_CASE =output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(_UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 47
|
from __future__ import annotations
import bisect
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
if hi < 0:
_lowerCAmelCase : int = len(_lowerCamelCase )
while lo < hi:
_lowerCAmelCase : Optional[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_lowerCAmelCase : Union[str, Any] = mid + 1
else:
_lowerCAmelCase : str = mid
return lo
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
if hi < 0:
_lowerCAmelCase : str = len(_lowerCamelCase )
while lo < hi:
_lowerCAmelCase : Tuple = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_lowerCAmelCase : Dict = mid + 1
else:
_lowerCAmelCase : str = mid
return lo
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_left(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_right(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 0
_lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase ) - 1
while left <= right:
_lowerCAmelCase : int = left + (right - left) // 2
_lowerCAmelCase : int = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_lowerCAmelCase : str = midpoint - 1
else:
_lowerCAmelCase : Any = midpoint + 1
return None
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = bisect.bisect_left(_lowerCamelCase , _lowerCamelCase )
if index != len(_lowerCamelCase ) and sorted_collection[index] == item:
return index
return None
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if right < left:
return None
_lowerCAmelCase : Optional[int] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , midpoint + 1 , _lowerCamelCase )
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by comma:\n").strip()
_snake_case = sorted(int(item) for item in user_input.split(","))
_snake_case = int(input("Enter a single number to be found in the list:\n"))
_snake_case = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''')
| 36
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : List[str] = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCAmelCase_ ( a):
def snake_case__ ( self, __a):
'''simple docstring'''
return 0.0
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCAmelCase : Optional[int] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 512
_lowerCAmelCase : Union[str, Any] = [1] + [0] * (size - 1)
_lowerCAmelCase : Optional[Any] = [filter_type.process(_lowerCamelCase ) for item in inputs]
_lowerCAmelCase : int = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase : str = np.abs(np.fft.fft(_lowerCamelCase ) )
_lowerCAmelCase : Union[str, Any] = 20 * np.logaa(_lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
_lowerCAmelCase : List[Any] = get_bounds(_lowerCamelCase , _lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(_lowerCamelCase )
plt.show()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 512
_lowerCAmelCase : Optional[Any] = [1] + [0] * (size - 1)
_lowerCAmelCase : str = [filter_type.process(_lowerCamelCase ) for item in inputs]
_lowerCAmelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase : Optional[Any] = np.angle(np.fft.fft(_lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(_lowerCamelCase , -2 * pi ) )
plt.show()
| 36
| 0
|
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__snake_case :Any = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__snake_case :int = typing.Union[np.floataa, int, float] # noqa: UP007
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return np.sqrt(np.sum((np.asarray(_UpperCAmelCase ) - np.asarray(_UpperCAmelCase )) ** 2 ) )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return sum((va - va) ** 2 for va, va in zip(_UpperCAmelCase , _UpperCAmelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def __snake_case ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10000 , globals=globals() , ) )
benchmark()
| 49
|
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
_lowerCAmelCase : List[str] = gray_code_sequence_string(_lowerCamelCase )
#
# convert them to integers
for i in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : List[str] = int(sequence[i] , 2 )
return sequence
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_lowerCAmelCase : List[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_lowerCAmelCase : Optional[int] = gray_code_sequence_string(bit_count - 1 )
_lowerCAmelCase : str = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_lowerCAmelCase : Dict = "0" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_lowerCAmelCase : Optional[Any] = "1" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
| 0
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
if n == 1 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
lowerCamelCase__ : List[str] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Dict = 2
while digits < n:
index += 1
lowerCamelCase__ : Union[str, Any] = len(str(fibonacci(_UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 1000 ) -> int:
return fibonacci_digits_index(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 50
|
from PIL import Image
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = image.size
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Tuple = image.load()
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCamelCase ):
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_snake_case = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 36
| 0
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
snake_case_ : int = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class __snake_case ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = TOKEN
HfFolder.save_token(_snake_case)
@classmethod
def lowerCamelCase ( cls : Union[str, Any]):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''')
except HTTPError:
pass
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
UpperCAmelCase_ = FlaxBertModel(_snake_case)
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token)
UpperCAmelCase_ = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""")
UpperCAmelCase_ = flatten_dict(unfreeze(model.params))
UpperCAmelCase_ = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
UpperCAmelCase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1e-3 , msg=F"""{key} not identical""")
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_snake_case , repo_id='''test-model-flax''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""")
UpperCAmelCase_ = flatten_dict(unfreeze(model.params))
UpperCAmelCase_ = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
UpperCAmelCase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1e-3 , msg=F"""{key} not identical""")
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
UpperCAmelCase_ = FlaxBertModel(_snake_case)
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token)
UpperCAmelCase_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''')
UpperCAmelCase_ = flatten_dict(unfreeze(model.params))
UpperCAmelCase_ = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
UpperCAmelCase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1e-3 , msg=F"""{key} not identical""")
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''')
UpperCAmelCase_ = flatten_dict(unfreeze(model.params))
UpperCAmelCase_ = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
UpperCAmelCase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1e-3 , msg=F"""{key} not identical""")
def A (__A : Dict , __A : Dict ) -> int:
"""simple docstring"""
UpperCAmelCase_ = True
UpperCAmelCase_ = flatten_dict(modela.params )
UpperCAmelCase_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
UpperCAmelCase_ = False
return models_are_equal
@require_flax
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
UpperCAmelCase_ = FlaxBertModel(_snake_case)
UpperCAmelCase_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_snake_case , _snake_case))
with self.assertRaises(_snake_case):
UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case)
UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case)
self.assertTrue(check_models_equal(_snake_case , _snake_case))
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
UpperCAmelCase_ = FlaxBertModel(_snake_case)
UpperCAmelCase_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_snake_case , _snake_case) , max_shard_size='''10KB''')
with self.assertRaises(_snake_case):
UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case)
UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case)
self.assertTrue(check_models_equal(_snake_case , _snake_case))
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''bert'''
UpperCAmelCase_ = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_snake_case):
UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case)
UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case)
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = '''bert'''
UpperCAmelCase_ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_snake_case):
UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case)
UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case)
self.assertIsNotNone(_snake_case)
| 51
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'wav2vec2'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : Union[str, Any] = feat_extract_activation
_lowerCAmelCase : Optional[Any] = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : str = list(__a)
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : str = num_conv_pos_embeddings
_lowerCAmelCase : List[Any] = num_conv_pos_embedding_groups
_lowerCAmelCase : str = len(self.conv_dim)
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : Optional[Any] = hidden_dropout
_lowerCAmelCase : List[str] = attention_dropout
_lowerCAmelCase : Tuple = activation_dropout
_lowerCAmelCase : int = feat_proj_dropout
_lowerCAmelCase : List[str] = final_dropout
_lowerCAmelCase : int = layerdrop
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : Optional[Any] = do_stable_layer_norm
_lowerCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : str = apply_spec_augment
_lowerCAmelCase : Optional[Any] = mask_time_prob
_lowerCAmelCase : Optional[int] = mask_time_length
_lowerCAmelCase : List[str] = mask_time_min_masks
_lowerCAmelCase : Optional[int] = mask_feature_prob
_lowerCAmelCase : Optional[int] = mask_feature_length
_lowerCAmelCase : List[str] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : Union[str, Any] = num_codevectors_per_group
_lowerCAmelCase : str = num_codevector_groups
_lowerCAmelCase : Optional[int] = contrastive_logits_temperature
_lowerCAmelCase : Optional[int] = feat_quantizer_dropout
_lowerCAmelCase : Optional[int] = num_negatives
_lowerCAmelCase : Union[str, Any] = codevector_dim
_lowerCAmelCase : Any = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Tuple = ctc_loss_reduction
_lowerCAmelCase : Tuple = ctc_zero_infinity
# adapter
_lowerCAmelCase : List[Any] = add_adapter
_lowerCAmelCase : List[str] = adapter_kernel_size
_lowerCAmelCase : str = adapter_stride
_lowerCAmelCase : List[str] = num_adapter_layers
_lowerCAmelCase : str = output_hidden_size or hidden_size
_lowerCAmelCase : Tuple = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : str = list(__a)
_lowerCAmelCase : Union[str, Any] = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : Tuple = xvector_output_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 36
| 0
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=True , _lowerCAmelCase="pt" ) -> int:
UpperCamelCase : Any = {"add_prefix_space": True} if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and not line.startswith(" " ) else {}
UpperCamelCase : List[str] = padding_side
return tokenizer(
[line] , max_length=_lowerCAmelCase , padding="max_length" if pad_to_max_length else None , truncation=_lowerCAmelCase , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , ) -> str:
UpperCamelCase : List[Any] = input_ids.ne(_lowerCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A__ ( __snake_case ):
def __init__( self , A_ , A_ , A_ , A_ , A_="train" , A_=None , A_=None , A_=None , A_="" , ):
'''simple docstring'''
super().__init__()
UpperCamelCase : str = Path(A_ ).joinpath(type_path + ".source" )
UpperCamelCase : Tuple = Path(A_ ).joinpath(type_path + ".target" )
UpperCamelCase : Tuple = self.get_char_lens(self.src_file )
UpperCamelCase : Optional[Any] = max_source_length
UpperCamelCase : List[str] = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
UpperCamelCase : Any = tokenizer
UpperCamelCase : Any = prefix
if n_obs is not None:
UpperCamelCase : Dict = self.src_lens[:n_obs]
UpperCamelCase : List[Any] = src_lang
UpperCamelCase : Optional[int] = tgt_lang
def __len__( self ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = index + 1 # linecache starts at 1
UpperCamelCase : Dict = self.prefix + linecache.getline(str(self.src_file ) , A_ ).rstrip("\n" )
UpperCamelCase : Dict = linecache.getline(str(self.tgt_file ) , A_ ).rstrip("\n" )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCamelCase : str = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A_ ) else self.tokenizer
)
UpperCamelCase : Any = self.tokenizer.generator if isinstance(self.tokenizer , A_ ) else self.tokenizer
UpperCamelCase : Dict = encode_line(A_ , A_ , self.max_source_length , "right" )
UpperCamelCase : Optional[int] = encode_line(A_ , A_ , self.max_target_length , "right" )
UpperCamelCase : List[Any] = source_inputs["input_ids"].squeeze()
UpperCamelCase : Dict = target_inputs["input_ids"].squeeze()
UpperCamelCase : str = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __UpperCamelCase( A_ ):
'''simple docstring'''
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = torch.stack([x["input_ids"] for x in batch] )
UpperCamelCase : Tuple = torch.stack([x["attention_mask"] for x in batch] )
UpperCamelCase : List[str] = torch.stack([x["decoder_input_ids"] for x in batch] )
UpperCamelCase : List[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A_ )
else self.tokenizer.pad_token_id
)
UpperCamelCase : Optional[int] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A_ )
else self.tokenizer.pad_token_id
)
UpperCamelCase : str = trim_batch(A_ , A_ )
UpperCamelCase , UpperCamelCase : Optional[Any] = trim_batch(A_ , A_ , attention_mask=A_ )
UpperCamelCase : Optional[int] = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
__lowerCamelCase : List[Any] = getLogger(__name__)
def A_ ( _lowerCAmelCase ) -> List[Any]:
return list(itertools.chain.from_iterable(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase ) -> None:
UpperCamelCase : Optional[int] = get_git_info()
save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , "git_log.json" ) )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=4 , **_lowerCAmelCase ) -> List[str]:
with open(_lowerCAmelCase , "w" ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase , indent=_lowerCAmelCase , **_lowerCAmelCase )
def A_ ( _lowerCAmelCase ) -> Any:
with open(_lowerCAmelCase ) as f:
return json.load(_lowerCAmelCase )
def A_ ( ) -> Any:
UpperCamelCase : List[Any] = git.Repo(search_parent_directories=_lowerCAmelCase )
UpperCamelCase : int = {
"repo_id": str(_lowerCAmelCase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List:
return list(map(_lowerCAmelCase , _lowerCAmelCase ) )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
with open(_lowerCAmelCase , "wb" ) as f:
return pickle.dump(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase ) -> Union[str, Any]:
def remove_articles(_lowerCAmelCase ):
return re.sub(r"\b(a|an|the)\b" , " " , _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase ):
UpperCamelCase : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Dict = normalize_answer(_lowerCAmelCase ).split()
UpperCamelCase : Union[str, Any] = normalize_answer(_lowerCAmelCase ).split()
UpperCamelCase : str = Counter(_lowerCAmelCase ) & Counter(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = sum(common.values() )
if num_same == 0:
return 0
UpperCamelCase : Tuple = 1.0 * num_same / len(_lowerCAmelCase )
UpperCamelCase : Optional[int] = 1.0 * num_same / len(_lowerCAmelCase )
UpperCamelCase : Any = (2 * precision * recall) / (precision + recall)
return fa
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
return normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = 0
for hypo, pred in zip(_lowerCAmelCase , _lowerCAmelCase ):
em += exact_match_score(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
em /= len(_lowerCAmelCase )
return {"em": em}
def A_ ( _lowerCAmelCase ) -> str:
return model_prefix.startswith("rag" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
UpperCamelCase : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCamelCase : Tuple = "dropout_rate"
for p in extra_params:
if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if not hasattr(_lowerCAmelCase , _lowerCAmelCase ) and not hasattr(_lowerCAmelCase , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(_lowerCAmelCase ) )
delattr(_lowerCAmelCase , _lowerCAmelCase )
continue
UpperCamelCase : Optional[Any] = p if hasattr(_lowerCAmelCase , _lowerCAmelCase ) else equivalent_param[p]
setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
delattr(_lowerCAmelCase , _lowerCAmelCase )
return hparams, config
| 52
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , a , )
class UpperCAmelCase_ ( a):
lowerCamelCase__ = RobertaConfig
lowerCamelCase__ = 'roberta'
def __init__( self, __a):
'''simple docstring'''
super().__init__(__a)
_lowerCAmelCase : Optional[Any] = RobertaEmbeddings(__a)
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , a , )
class UpperCAmelCase_ ( a):
lowerCamelCase__ = RobertaConfig
lowerCamelCase__ = 'roberta'
def __init__( self, __a):
'''simple docstring'''
super().__init__(__a)
_lowerCAmelCase : Optional[int] = config.num_labels
_lowerCAmelCase : Optional[int] = config.num_hidden_layers
_lowerCAmelCase : Optional[int] = DeeRobertaModel(__a)
_lowerCAmelCase : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob)
_lowerCAmelCase : List[str] = nn.Linear(config.hidden_size, self.config.num_labels)
@add_start_docstrings_to_model_forward(__a)
def snake_case__ ( self, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=-1, __a=False, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.num_layers
try:
_lowerCAmelCase : List[Any] = self.roberta(
__a, attention_mask=__a, token_type_ids=__a, position_ids=__a, head_mask=__a, inputs_embeds=__a, )
_lowerCAmelCase : List[Any] = outputs[1]
_lowerCAmelCase : Dict = self.dropout(__a)
_lowerCAmelCase : Dict = self.classifier(__a)
_lowerCAmelCase : Optional[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : Union[str, Any] = e.exit_layer
_lowerCAmelCase : List[Any] = outputs[0]
if not self.training:
_lowerCAmelCase : int = entropy(__a)
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : str = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Optional[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1), labels.view(-1))
else:
_lowerCAmelCase : Optional[Any] = CrossEntropyLoss()
_lowerCAmelCase : Optional[Any] = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
# work with highway exits
_lowerCAmelCase : Optional[int] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Any = highway_exit[0]
if not self.training:
highway_logits_all.append(__a)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[str] = MSELoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1), labels.view(-1))
else:
_lowerCAmelCase : Dict = CrossEntropyLoss()
_lowerCAmelCase : Optional[Any] = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1))
highway_losses.append(__a)
if train_highway:
_lowerCAmelCase : int = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Optional[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 36
| 0
|
'''simple docstring'''
def lowercase__ ( __lowercase : int ) -> list[int]:
"""simple docstring"""
if num <= 0:
raise ValueError('Input must be a positive integer' )
__UpperCamelCase = [True] * (num + 1)
__UpperCamelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowercase ):
__UpperCamelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : str =int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 53
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'vision-encoder-decoder'
lowerCamelCase__ = True
def __init__( self, **__a):
'''simple docstring'''
super().__init__(**__a)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"A configuraton of type {self.model_type} cannot be instantiated because "
f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}")
_lowerCAmelCase : str = kwargs.pop("encoder")
_lowerCAmelCase : Any = encoder_config.pop("model_type")
_lowerCAmelCase : str = kwargs.pop("decoder")
_lowerCAmelCase : List[str] = decoder_config.pop("model_type")
_lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Optional[int] = True
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config")
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : str = True
return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[str] = self.encoder.to_dict()
_lowerCAmelCase : List[str] = self.decoder.to_dict()
_lowerCAmelCase : Any = self.__class__.model_type
return output
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}})
class UpperCAmelCase_ ( a):
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = OrderedDict()
_lowerCAmelCase : Any = {0: "batch", 1: "past_decoder_sequence + sequence"}
_lowerCAmelCase : List[str] = {0: "batch", 1: "past_decoder_sequence + sequence"}
_lowerCAmelCase : Optional[Any] = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def snake_case__ ( self, __a, __a = -1, __a = -1, __a = False, __a = None, ):
'''simple docstring'''
import torch
_lowerCAmelCase : Optional[Any] = OrderedDict()
_lowerCAmelCase : List[str] = super().generate_dummy_inputs(
__a, batch_size=__a, seq_length=__a, is_pair=__a, framework=__a)
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = dummy_input["input_ids"].shape
_lowerCAmelCase : str = (batch, encoder_sequence, self._config.encoder_hidden_size)
_lowerCAmelCase : List[str] = dummy_input.pop("input_ids")
_lowerCAmelCase : List[str] = dummy_input.pop("attention_mask")
_lowerCAmelCase : Optional[int] = torch.zeros(__a)
return common_inputs
class UpperCAmelCase_ ( a):
@property
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self, __a):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(__a)
def snake_case__ ( self, __a, __a, __a = "default"):
'''simple docstring'''
_lowerCAmelCase : Dict = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__a, __a)
| 36
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : List[str] = ["image_processor", "tokenizer"]
snake_case__ : Any = "BlipImageProcessor"
snake_case__ : Any = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any ) -> Any:
__SCREAMING_SNAKE_CASE = False
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.image_processor
def __call__( self : Union[str, Any] , UpperCAmelCase__ : ImageInput = None , UpperCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase__ : Union[str, Any] , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
__SCREAMING_SNAKE_CASE = self.tokenizer
__SCREAMING_SNAKE_CASE = self.tokenizer(
text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , )
return text_encoding
# add pixel_values
__SCREAMING_SNAKE_CASE = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ )
if text is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer(
text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , )
else:
__SCREAMING_SNAKE_CASE = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase__ )
return encoding_image_processor
def UpperCAmelCase_ ( self : str , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[Any] ) -> List[str]:
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Tuple ) -> List[str]:
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
__SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 54
|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase_ ( a):
def __get__( self, __a, __a=None):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute")
_lowerCAmelCase : List[Any] = "__cached_" + self.fget.__name__
_lowerCAmelCase : Dict = getattr(__a, __a, __a)
if cached is None:
_lowerCAmelCase : str = self.fget(__a)
setattr(__a, __a, __a)
return cached
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"invalid truth value {val!r}" )
def A ( _lowerCamelCase ):
'''simple docstring'''
if is_torch_fx_proxy(_lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(_lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(_lowerCamelCase , np.ndarray )
def A ( _lowerCamelCase ):
'''simple docstring'''
return isinstance(_lowerCamelCase , np.ndarray )
def A ( _lowerCamelCase ):
'''simple docstring'''
return _is_numpy(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import torch
return isinstance(_lowerCamelCase , torch.Tensor )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import torch
return isinstance(_lowerCamelCase , torch.device )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import torch
if isinstance(_lowerCamelCase , _lowerCamelCase ):
if hasattr(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase )
else:
return False
return isinstance(_lowerCamelCase , torch.dtype )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import tensorflow as tf
return isinstance(_lowerCamelCase , tf.Tensor )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_lowerCamelCase , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(_lowerCamelCase )
return type(_lowerCamelCase ) == tf.Tensor
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(_lowerCamelCase , jnp.ndarray )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_flax_available() else _is_jax(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
if isinstance(_lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple) ):
return [to_py_obj(_lowerCamelCase ) for o in obj]
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase ).tolist()
elif isinstance(_lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def A ( _lowerCamelCase ):
'''simple docstring'''
if isinstance(_lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple) ):
return np.array(_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase )
else:
return obj
class UpperCAmelCase_ ( a):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = fields(self)
# Safety and consistency checks
if not len(__a):
raise ValueError(f"{self.__class__.__name__} has no fields.")
if not all(field.default is None for field in class_fields[1:]):
raise ValueError(f"{self.__class__.__name__} should not have more than one required field.")
_lowerCAmelCase : Dict = getattr(self, class_fields[0].name)
_lowerCAmelCase : str = all(getattr(self, field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(__a):
if isinstance(__a, __a):
_lowerCAmelCase : Tuple = first_field.items()
_lowerCAmelCase : Dict = True
else:
try:
_lowerCAmelCase : Dict = iter(__a)
_lowerCAmelCase : Any = True
except TypeError:
_lowerCAmelCase : Any = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__a):
if (
not isinstance(__a, (list, tuple))
or not len(__a) == 2
or not isinstance(element[0], __a)
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_lowerCAmelCase : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"Cannot set key/value for {element}. It needs to be a tuple (key, value).")
break
setattr(self, element[0], element[1])
if element[1] is not None:
_lowerCAmelCase : Any = element[1]
elif first_field is not None:
_lowerCAmelCase : Any = first_field
else:
for field in class_fields:
_lowerCAmelCase : Dict = getattr(self, field.name)
if v is not None:
_lowerCAmelCase : Union[str, Any] = v
def __delitem__( self, *__a, **__a):
'''simple docstring'''
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.")
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.")
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.")
def __getitem__( self, __a):
'''simple docstring'''
if isinstance(__a, __a):
_lowerCAmelCase : Optional[int] = dict(self.items())
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self, __a, __a):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__a, __a)
super().__setattr__(__a, __a)
def __setitem__( self, __a, __a):
'''simple docstring'''
super().__setitem__(__a, __a)
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
return tuple(self[k] for k in self.keys())
class UpperCAmelCase_ ( a , a):
@classmethod
def snake_case__ ( cls, __a):
'''simple docstring'''
raise ValueError(
f"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}")
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'longest'
lowerCamelCase__ = 'max_length'
lowerCamelCase__ = 'do_not_pad'
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'pt'
lowerCamelCase__ = 'tf'
lowerCamelCase__ = 'np'
lowerCamelCase__ = 'jax'
class UpperCAmelCase_ :
def __init__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = context_managers
_lowerCAmelCase : Dict = ExitStack()
def __enter__( self):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(__a)
def __exit__( self, *__a, **__a):
'''simple docstring'''
self.stack.__exit__(*__a, **__a)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = infer_framework(_lowerCamelCase )
if framework == "tf":
_lowerCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_lowerCAmelCase : str = inspect.signature(model_class.forward ) # PyTorch models
else:
_lowerCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = model_class.__name__
_lowerCAmelCase : Optional[Any] = infer_framework(_lowerCamelCase )
if framework == "tf":
_lowerCAmelCase : Dict = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_lowerCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
_lowerCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def A ( _lowerCamelCase , _lowerCamelCase = "" , _lowerCamelCase = "." ):
'''simple docstring'''
def _flatten_dict(_lowerCamelCase , _lowerCamelCase="" , _lowerCamelCase="." ):
for k, v in d.items():
_lowerCAmelCase : Dict = str(_lowerCamelCase ) + delimiter + str(_lowerCamelCase ) if parent_key else k
if v and isinstance(_lowerCamelCase , _lowerCamelCase ):
yield from flatten_dict(_lowerCamelCase , _lowerCamelCase , delimiter=_lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) )
@contextmanager
def A ( _lowerCamelCase , _lowerCamelCase = False ):
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def A ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.transpose(_lowerCamelCase , axes=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.T if axes is None else array.permute(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.transpose(_lowerCamelCase , perm=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.transpose(_lowerCamelCase , axes=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for transpose: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.reshape(_lowerCamelCase , _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.reshape(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.reshape(_lowerCamelCase , _lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.reshape(_lowerCamelCase , _lowerCamelCase )
else:
raise ValueError(F"Type not supported for reshape: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for squeeze: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.expand_dims(_lowerCamelCase , _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.unsqueeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(_lowerCamelCase , axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.expand_dims(_lowerCamelCase , axis=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.size(_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.numel()
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.size(_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return array.size
else:
raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(_lowerCamelCase , (tuple, list) ):
_lowerCAmelCase : List[Any] = [F"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
_lowerCAmelCase : Tuple = F"{repo_id}--{value}"
return auto_map
def A ( _lowerCamelCase ):
'''simple docstring'''
for base_class in inspect.getmro(_lowerCamelCase ):
_lowerCAmelCase : Tuple = base_class.__module__
_lowerCAmelCase : int = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"Could not infer framework from class {model_class}." )
| 36
| 0
|
'''simple docstring'''
import torch
def __snake_case ( ):
if torch.cuda.is_available():
lowerCamelCase_ = torch.cuda.device_count()
else:
lowerCamelCase_ = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 55
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(_lowerCamelCase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = _distribute_shards(**_lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = _split_gen_kwargs(_lowerCamelCase , _lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(_lowerCamelCase ):
_number_of_shards_in_gen_kwargs(_lowerCamelCase )
else:
_lowerCAmelCase : Optional[int] = _number_of_shards_in_gen_kwargs(_lowerCamelCase )
assert out == expected
| 36
| 0
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a : int = 16
a : Optional[Any] = 32
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = 16 ) -> str:
'''simple docstring'''
snake_case_ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case_ = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(__UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=__UpperCAmelCase, max_length=__UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ = datasets.map(
__UpperCAmelCase, batched=__UpperCAmelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(__UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ = 16
elif accelerator.mixed_precision != "no":
snake_case_ = 8
else:
snake_case_ = None
return tokenizer.pad(
__UpperCAmelCase, padding='''longest''', max_length=__UpperCAmelCase, pad_to_multiple_of=__UpperCAmelCase, return_tensors='''pt''', )
# Instantiate dataloaders.
snake_case_ = DataLoader(
tokenized_datasets['''train'''], shuffle=__UpperCAmelCase, collate_fn=__UpperCAmelCase, batch_size=__UpperCAmelCase )
snake_case_ = DataLoader(
tokenized_datasets['''validation'''], shuffle=__UpperCAmelCase, collate_fn=__UpperCAmelCase, batch_size=__UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a : Dict = mocked_dataloaders # noqa: F811
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> str:
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', __UpperCAmelCase ) == "1":
snake_case_ = 2
# Initialize accelerator
snake_case_ = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ = config['''lr''']
snake_case_ = int(config['''num_epochs'''] )
snake_case_ = int(config['''seed'''] )
snake_case_ = int(config['''batch_size'''] )
snake_case_ = evaluate.load('''glue''', '''mrpc''' )
# If the batch size is too big we use gradient accumulation
snake_case_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case_ = batch_size // MAX_GPU_BATCH_SIZE
snake_case_ = MAX_GPU_BATCH_SIZE
set_seed(__UpperCAmelCase )
snake_case_ ,snake_case_ = get_dataloaders(__UpperCAmelCase, __UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=__UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ = AdamW(params=model.parameters(), lr=__UpperCAmelCase )
# Instantiate scheduler
snake_case_ = get_linear_schedule_with_warmup(
optimizer=__UpperCAmelCase, num_warmup_steps=100, num_training_steps=(len(__UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = accelerator.prepare(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
# Now we train the model
for epoch in range(__UpperCAmelCase ):
model.train()
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ = model(**__UpperCAmelCase )
snake_case_ = outputs.loss
snake_case_ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
snake_case_ = 0
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ = model(**__UpperCAmelCase )
snake_case_ = outputs.logits.argmax(dim=-1 )
snake_case_ ,snake_case_ = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__UpperCAmelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
snake_case_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__UpperCAmelCase, references=__UpperCAmelCase, )
snake_case_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:", __UpperCAmelCase )
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
snake_case_ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=__UpperCAmelCase, default=__UpperCAmelCase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
snake_case_ = parser.parse_args()
snake_case_ = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCAmelCase, __UpperCAmelCase )
if __name__ == "__main__":
main()
| 56
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCAmelCase_ :
def __init__( self, __a = "cpu", __a = "openai/clip-vit-large-patch14"):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = device
_lowerCAmelCase : Optional[int] = CLIPTokenizerFast.from_pretrained(__a)
_lowerCAmelCase : Any = [0.48_145_466, 0.4_578_275, 0.40_821_073]
_lowerCAmelCase : Union[str, Any] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
_lowerCAmelCase : Tuple = torchvision.transforms.Normalize(self.image_mean, self.image_std)
_lowerCAmelCase : Optional[int] = torchvision.transforms.Resize(224)
_lowerCAmelCase : Dict = torchvision.transforms.CenterCrop(224)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.resize(__a)
_lowerCAmelCase : List[str] = self.center_crop(__a)
_lowerCAmelCase : Optional[Any] = self.normalize(__a)
return images
def __call__( self, __a=None, __a=None, **__a):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(text=__a, **__a)
_lowerCAmelCase : List[str] = self.preprocess_img(__a)
_lowerCAmelCase : Tuple = {key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class UpperCAmelCase_ ( nn.Module):
def __init__( self, __a=10, __a=0.01, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=False, __a=True, __a="image", __a=True, __a=False, __a=False, __a=False, ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : List[str] = device if device else get_device()
if vqgan:
_lowerCAmelCase : Union[str, Any] = vqgan
else:
_lowerCAmelCase : Optional[Any] = load_vqgan(self.device, conf_path=__a, ckpt_path=__a)
self.vqgan.eval()
if clip:
_lowerCAmelCase : str = clip
else:
_lowerCAmelCase : int = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
self.clip.to(self.device)
_lowerCAmelCase : Optional[int] = ProcessorGradientFlow(device=self.device)
_lowerCAmelCase : Any = iterations
_lowerCAmelCase : List[Any] = lr
_lowerCAmelCase : Tuple = log
_lowerCAmelCase : List[str] = make_grid
_lowerCAmelCase : int = return_val
_lowerCAmelCase : Dict = quantize
_lowerCAmelCase : Any = self.vqgan.decoder.z_shape
def snake_case__ ( self, __a=None, __a=None, __a=5, __a=True):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
if output_path is None:
_lowerCAmelCase : List[Any] = "./animation.gif"
if input_path is None:
_lowerCAmelCase : str = self.save_path
_lowerCAmelCase : str = sorted(glob(input_path + "/*"))
if not len(__a):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)")
if len(__a) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)")
_lowerCAmelCase : Optional[int] = total_duration / len(__a)
_lowerCAmelCase : Union[str, Any] = [frame_duration] * len(__a)
if extend_frames:
_lowerCAmelCase : Any = 1.5
_lowerCAmelCase : List[str] = 3
for file_name in paths:
if file_name.endswith(".png"):
images.append(imageio.imread(__a))
imageio.mimsave(__a, __a, duration=__a)
print(f"gif saved to {output_path}")
def snake_case__ ( self, __a=None, __a=None):
'''simple docstring'''
if not (path or img):
raise ValueError("Input either path or tensor")
if img is not None:
raise NotImplementedError
_lowerCAmelCase : Dict = preprocess(Image.open(__a), target_image_size=256).to(self.device)
_lowerCAmelCase : Dict = preprocess_vqgan(__a)
_lowerCAmelCase , *_lowerCAmelCase : str = self.vqgan.encode(__a)
return z
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.latent.detach().requires_grad_()
_lowerCAmelCase : Dict = base_latent + transform_vector
if self.quantize:
_lowerCAmelCase , *_lowerCAmelCase : List[Any] = self.vqgan.quantize(__a)
else:
_lowerCAmelCase : Any = trans_latent
return self.vqgan.decode(__a)
def snake_case__ ( self, __a, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : int = self.clip_preprocessor(text=__a, images=__a, return_tensors="pt", padding=__a)
_lowerCAmelCase : Optional[int] = self.clip(**__a)
_lowerCAmelCase : Any = clip_outputs.logits_per_image
if weights is not None:
_lowerCAmelCase : Tuple = similarity_logits * weights
return similarity_logits.sum()
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self._get_clip_similarity(pos_prompts["prompts"], __a, weights=(1 / pos_prompts["weights"]))
if neg_prompts:
_lowerCAmelCase : List[Any] = self._get_clip_similarity(neg_prompts["prompts"], __a, weights=neg_prompts["weights"])
else:
_lowerCAmelCase : Union[str, Any] = torch.tensor([1], device=self.device)
_lowerCAmelCase : List[str] = -torch.log(__a) + torch.log(__a)
return loss
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.randn_like(self.latent, requires_grad=__a, device=self.device)
_lowerCAmelCase : Optional[int] = torch.optim.Adam([vector], lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
_lowerCAmelCase : Any = self._add_vector(__a)
_lowerCAmelCase : Optional[Any] = loop_post_process(__a)
_lowerCAmelCase : Optional[Any] = self._get_CLIP_loss(__a, __a, __a)
print("CLIP loss", __a)
if self.log:
wandb.log({"CLIP Loss": clip_loss})
clip_loss.backward(retain_graph=__a)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
wandb.init(reinit=__a, project="face-editor")
wandb.config.update({"Positive Prompts": positive_prompts})
wandb.config.update({"Negative Prompts": negative_prompts})
wandb.config.update({"lr": self.lr, "iterations": self.iterations})
if image_path:
_lowerCAmelCase : str = Image.open(__a)
_lowerCAmelCase : int = image.resize((256, 256))
wandb.log("Original Image", wandb.Image(__a))
def snake_case__ ( self, __a):
'''simple docstring'''
if not prompts:
return []
_lowerCAmelCase : int = []
_lowerCAmelCase : List[str] = []
if isinstance(__a, __a):
_lowerCAmelCase : Union[str, Any] = [prompt.strip() for prompt in prompts.split("|")]
for prompt in prompts:
if isinstance(__a, (tuple, list)):
_lowerCAmelCase : Optional[Any] = prompt[0]
_lowerCAmelCase : Union[str, Any] = float(prompt[1])
elif ":" in prompt:
_lowerCAmelCase , _lowerCAmelCase : int = prompt.split(":")
_lowerCAmelCase : Optional[Any] = float(__a)
else:
_lowerCAmelCase : Optional[int] = prompt
_lowerCAmelCase : List[Any] = 1.0
processed_prompts.append(__a)
weights.append(__a)
return {
"prompts": processed_prompts,
"weights": torch.tensor(__a, device=self.device),
}
def snake_case__ ( self, __a, __a=None, __a=None, __a=True, __a=False, __a=True, __a=True, __a=None, ):
'''simple docstring'''
if image_path:
_lowerCAmelCase : List[Any] = self._get_latent(__a)
else:
_lowerCAmelCase : Any = torch.randn(self.latent_dim, device=self.device)
if self.log:
self._init_logging(__a, __a, __a)
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCAmelCase : int = self.process_prompts(__a)
_lowerCAmelCase : List[str] = self.process_prompts(__a)
if save_final and save_path is None:
_lowerCAmelCase : int = os.path.join("./outputs/", "_".join(pos_prompts["prompts"]))
if not os.path.exists(__a):
os.makedirs(__a)
else:
_lowerCAmelCase : Tuple = save_path + "_" + get_timestamp()
os.makedirs(__a)
_lowerCAmelCase : Tuple = save_path
_lowerCAmelCase : List[Any] = self.vqgan.decode(self.latent)[0]
if show_intermediate:
print("Original Image")
show_pil(custom_to_pil(__a))
_lowerCAmelCase : int = loop_post_process(__a)
for iter, transformed_img in enumerate(self._optimize_CLIP(__a, __a, __a)):
if show_intermediate:
show_pil(__a)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png"))
if self.log:
wandb.log({"Image": wandb.Image(__a)})
if show_final:
show_pil(__a)
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png"))
| 36
| 0
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase__ )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =field(default="""audio-classification""" ,metadata={"""include_in_asdict_even_if_is_default""": True} )
__UpperCAmelCase : ClassVar[Features] =Features({"""audio""": Audio()} )
__UpperCAmelCase : ClassVar[Features] =Features({"""labels""": ClassLabel} )
__UpperCAmelCase : str ="audio"
__UpperCAmelCase : str ="labels"
def snake_case ( self , __a ):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , __a ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
__lowerCAmelCase = copy.deepcopy(self )
__lowerCAmelCase = self.label_schema.copy()
__lowerCAmelCase = features[self.label_column]
__lowerCAmelCase = label_schema
return task_template
@property
def snake_case ( self ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 57
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_snake_case = get_tests_dir("fixtures")
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = mock.Mock()
_lowerCAmelCase : int = 500
_lowerCAmelCase : Tuple = {}
_lowerCAmelCase : str = HTTPError
_lowerCAmelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
_lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request", return_value=__a) as mock_head:
_lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# This check we did call the fake head request
mock_head.assert_called()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json")
def snake_case__ ( self):
'''simple docstring'''
with self.assertRaises(__a):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase : int = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants")
_lowerCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants", subfolder="feature_extractor")
self.assertIsNotNone(__a)
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase):
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TOKEN
HfFolder.save_token(__a)
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id="test-image-processor")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-image-processor-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="test-dynamic-image-processor")
except HTTPError:
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(__a)
image_processor.push_to_hub("test-image-processor", use_auth_token=self._token)
_lowerCAmelCase : str = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a))
# Reset repo
delete_repo(token=self._token, repo_id="test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__a, repo_id="test-image-processor", push_to_hub=__a, use_auth_token=self._token)
_lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = ViTImageProcessor.from_pretrained(__a)
image_processor.push_to_hub("valid_org/test-image-processor", use_auth_token=self._token)
_lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained("valid_org/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a))
# Reset repo
delete_repo(token=self._token, repo_id="valid_org/test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__a, repo_id="valid_org/test-image-processor-org", push_to_hub=__a, use_auth_token=self._token)
_lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org")
for k, v in image_processor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a))
def snake_case__ ( self):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
_lowerCAmelCase : List[str] = CustomImageProcessor.from_pretrained(__a)
image_processor.push_to_hub("test-dynamic-image-processor", use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"}, )
_lowerCAmelCase : Tuple = AutoImageProcessor.from_pretrained(
f"{USER}/test-dynamic-image-processor", trust_remote_code=__a)
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, "CustomImageProcessor")
| 36
| 0
|
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowercase_ = NewType("""DataClass""", Any)
lowercase_ = NewType("""DataClassType""", Any)
def lowerCamelCase ( __lowerCamelCase : str ) ->int:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def lowerCamelCase ( __lowerCamelCase : list ) ->Callable[[str], Any]:
_SCREAMING_SNAKE_CASE = {str(__lowerCamelCase ): choice for choice in choices}
return lambda __lowerCamelCase : str_to_choice.get(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase ( *,
__lowerCamelCase : Union[str, List[str]] = None , __lowerCamelCase : str = None , __lowerCamelCase : Any = dataclasses.MISSING , __lowerCamelCase : Callable[[], Any] = dataclasses.MISSING , __lowerCamelCase : dict = None , **__lowerCamelCase : Optional[Any] , ) ->dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_SCREAMING_SNAKE_CASE = {}
if aliases is not None:
_SCREAMING_SNAKE_CASE = aliases
if help is not None:
_SCREAMING_SNAKE_CASE = help
return dataclasses.field(metadata=__lowerCamelCase , default=__lowerCamelCase , default_factory=__lowerCamelCase , **__lowerCamelCase )
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = 42
def __init__( self , A , **A ) -> int:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
_SCREAMING_SNAKE_CASE = ArgumentDefaultsHelpFormatter
super().__init__(**A )
if dataclasses.is_dataclass(A ):
_SCREAMING_SNAKE_CASE = [dataclass_types]
_SCREAMING_SNAKE_CASE = list(A )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(A )
@staticmethod
def snake_case_( A , A ) -> int:
_SCREAMING_SNAKE_CASE = f'--{field.name}'
_SCREAMING_SNAKE_CASE = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , A ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
_SCREAMING_SNAKE_CASE = kwargs.pop("""aliases""" , [] )
if isinstance(A , A ):
_SCREAMING_SNAKE_CASE = [aliases]
_SCREAMING_SNAKE_CASE = getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(A , """UnionType""" ) and isinstance(A , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(A ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
f' Problem encountered in field \'{field.name}\'.' )
if type(A ) not in field.type.__args__:
# filter `str` in Union
_SCREAMING_SNAKE_CASE = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_SCREAMING_SNAKE_CASE = getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_SCREAMING_SNAKE_CASE = (
field.type.__args__[0] if isinstance(A , field.type.__args__[1] ) else field.type.__args__[1]
)
_SCREAMING_SNAKE_CASE = getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_SCREAMING_SNAKE_CASE = {}
if origin_type is Literal or (isinstance(field.type , A ) and issubclass(field.type , A )):
if origin_type is Literal:
_SCREAMING_SNAKE_CASE = field.type.__args__
else:
_SCREAMING_SNAKE_CASE = [x.value for x in field.type]
_SCREAMING_SNAKE_CASE = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
_SCREAMING_SNAKE_CASE = field.default
else:
_SCREAMING_SNAKE_CASE = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_SCREAMING_SNAKE_CASE = copy(A )
# Hack because type=bool in argparse does not behave as we want.
_SCREAMING_SNAKE_CASE = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_SCREAMING_SNAKE_CASE = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_SCREAMING_SNAKE_CASE = default
# This tells argparse we accept 0 or 1 value after --field_name
_SCREAMING_SNAKE_CASE = """?"""
# This is the value that will get picked if we do --field_name (without value)
_SCREAMING_SNAKE_CASE = True
elif isclass(A ) and issubclass(A , A ):
_SCREAMING_SNAKE_CASE = field.type.__args__[0]
_SCREAMING_SNAKE_CASE = """+"""
if field.default_factory is not dataclasses.MISSING:
_SCREAMING_SNAKE_CASE = field.default_factory()
elif field.default is dataclasses.MISSING:
_SCREAMING_SNAKE_CASE = True
else:
_SCREAMING_SNAKE_CASE = field.type
if field.default is not dataclasses.MISSING:
_SCREAMING_SNAKE_CASE = field.default
elif field.default_factory is not dataclasses.MISSING:
_SCREAMING_SNAKE_CASE = field.default_factory()
else:
_SCREAMING_SNAKE_CASE = True
parser.add_argument(A , *A , **A )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_SCREAMING_SNAKE_CASE = False
parser.add_argument(f'--no_{field.name}' , action="""store_false""" , dest=field.name , **A )
def snake_case_( self , A ) -> Dict:
if hasattr(A , """_argument_group_name""" ):
_SCREAMING_SNAKE_CASE = self.add_argument_group(dtype._argument_group_name )
else:
_SCREAMING_SNAKE_CASE = self
try:
_SCREAMING_SNAKE_CASE = get_type_hints(A )
except NameError:
raise RuntimeError(
f'Type resolution failed for {dtype}. Try declaring the class in global scope or '
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A ):
_SCREAMING_SNAKE_CASE = """.""".join(map(A , sys.version_info[:3] ) )
raise RuntimeError(
f'Type resolution failed for {dtype} on Python {python_version}. Try removing '
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(A ):
if not field.init:
continue
_SCREAMING_SNAKE_CASE = type_hints[field.name]
self._parse_dataclass_field(A , A )
def snake_case_( self , A=None , A=False , A=True , A=None , A=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_SCREAMING_SNAKE_CASE = []
if args_filename:
args_files.append(Path(A ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_SCREAMING_SNAKE_CASE = ArgumentParser()
args_file_parser.add_argument(A , type=A , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = args_file_parser.parse_known_args(args=A )
_SCREAMING_SNAKE_CASE = vars(A ).get(args_file_flag.lstrip("""-""" ) , A )
if cmd_args_file_paths:
args_files.extend([Path(A ) for p in cmd_args_file_paths] )
_SCREAMING_SNAKE_CASE = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_SCREAMING_SNAKE_CASE = file_args + args if args is not None else file_args + sys.argv[1:]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.parse_known_args(args=A )
_SCREAMING_SNAKE_CASE = []
for dtype in self.dataclass_types:
_SCREAMING_SNAKE_CASE = {f.name for f in dataclasses.fields(A ) if f.init}
_SCREAMING_SNAKE_CASE = {k: v for k, v in vars(A ).items() if k in keys}
for k in keys:
delattr(A , A )
_SCREAMING_SNAKE_CASE = dtype(**A )
outputs.append(A )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(A )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def snake_case_( self , A , A = False ) -> Tuple[DataClass, ...]:
_SCREAMING_SNAKE_CASE = set(args.keys() )
_SCREAMING_SNAKE_CASE = []
for dtype in self.dataclass_types:
_SCREAMING_SNAKE_CASE = {f.name for f in dataclasses.fields(A ) if f.init}
_SCREAMING_SNAKE_CASE = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_SCREAMING_SNAKE_CASE = dtype(**A )
outputs.append(A )
if not allow_extra_keys and unused_keys:
raise ValueError(f'Some keys are not used by the HfArgumentParser: {sorted(A )}' )
return tuple(A )
def snake_case_( self , A , A = False ) -> Tuple[DataClass, ...]:
with open(Path(A ) , encoding="""utf-8""" ) as open_json_file:
_SCREAMING_SNAKE_CASE = json.loads(open_json_file.read() )
_SCREAMING_SNAKE_CASE = self.parse_dict(A , allow_extra_keys=A )
return tuple(A )
def snake_case_( self , A , A = False ) -> Tuple[DataClass, ...]:
_SCREAMING_SNAKE_CASE = self.parse_dict(yaml.safe_load(Path(A ).read_text() ) , allow_extra_keys=A )
return tuple(A )
| 58
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=24, __a=2, __a=6, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=None, __a=1000, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : int = seq_length
_lowerCAmelCase : Optional[int] = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : List[str] = use_token_type_ids
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : int = max_position_embeddings
_lowerCAmelCase : Optional[int] = type_vocab_size
_lowerCAmelCase : Optional[Any] = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : List[Any] = num_labels
_lowerCAmelCase : Tuple = scope
_lowerCAmelCase : str = range_bbox
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : Dict = bbox[i, j, 3]
_lowerCAmelCase : int = bbox[i, j, 1]
_lowerCAmelCase : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : str = bbox[i, j, 2]
_lowerCAmelCase : List[Any] = bbox[i, j, 0]
_lowerCAmelCase : str = t
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
_lowerCAmelCase : Dict = None
if self.use_token_type_ids:
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : Optional[int] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = LiltModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(__a, bbox=__a, attention_mask=__a, token_type_ids=__a)
_lowerCAmelCase : str = model(__a, bbox=__a, token_type_ids=__a)
_lowerCAmelCase : List[Any] = model(__a, bbox=__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Optional[Any] = LiltForTokenClassification(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(
__a, bbox=__a, attention_mask=__a, token_type_ids=__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = LiltForQuestionAnswering(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Tuple = model(
__a, bbox=__a, attention_mask=__a, token_type_ids=__a, start_positions=__a, end_positions=__a, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , a , unittest.TestCase):
lowerCamelCase__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
return True
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = LiltModelTester(self)
_lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : Any = type
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = LiltModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_torch
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__a)
_lowerCAmelCase : Any = torch.tensor([[1, 2]], device=__a)
_lowerCAmelCase : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]], device=__a)
# forward pass
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(input_ids=__a, bbox=__a)
_lowerCAmelCase : Optional[int] = torch.Size([1, 2, 768])
_lowerCAmelCase : List[str] = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]], device=__a, )
self.assertTrue(outputs.last_hidden_state.shape, __a)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3], __a, atol=1E-3))
| 36
| 0
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
def __init__(self : Optional[Any] , snake_case__ : str , snake_case__ : Optional[Any]=13 , snake_case__ : List[str]=7 , snake_case__ : Dict=True , snake_case__ : str=True , snake_case__ : Optional[Any]=True , snake_case__ : Tuple=True , snake_case__ : Union[str, Any]=True , snake_case__ : Optional[int]=False , snake_case__ : Tuple=False , snake_case__ : Tuple=False , snake_case__ : List[str]=2 , snake_case__ : List[Any]=99 , snake_case__ : List[Any]=0 , snake_case__ : List[Any]=32 , snake_case__ : Tuple=5 , snake_case__ : int=4 , snake_case__ : str=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=5_12 , snake_case__ : Optional[Any]=2 , snake_case__ : Tuple=0.02 , snake_case__ : Optional[int]=2 , snake_case__ : Union[str, Any]=4 , snake_case__ : int="last" , snake_case__ : str=True , snake_case__ : Dict=None , snake_case__ : int=0 , ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = parent
snake_case : List[str] = batch_size
snake_case : Dict = seq_length
snake_case : List[str] = is_training
snake_case : Dict = use_input_lengths
snake_case : List[str] = use_token_type_ids
snake_case : Dict = use_labels
snake_case : List[Any] = gelu_activation
snake_case : Any = sinusoidal_embeddings
snake_case : Tuple = causal
snake_case : str = asm
snake_case : Tuple = n_langs
snake_case : int = vocab_size
snake_case : Any = n_special
snake_case : List[Any] = hidden_size
snake_case : Optional[int] = num_hidden_layers
snake_case : List[Any] = num_attention_heads
snake_case : Any = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : Dict = max_position_embeddings
snake_case : int = type_sequence_label_size
snake_case : List[str] = initializer_range
snake_case : str = num_labels
snake_case : Optional[Any] = num_choices
snake_case : Union[str, Any] = summary_type
snake_case : Any = use_proj
snake_case : int = scope
snake_case : Optional[Any] = bos_token_id
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Tuple = None
if self.use_input_lengths:
snake_case : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case : Any = None
if self.use_token_type_ids:
snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case : Optional[int] = None
snake_case : Optional[int] = None
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Union[str, Any] = ids_tensor([self.batch_size] , 2 ).float()
snake_case : int = ids_tensor([self.batch_size] , self.num_choices )
snake_case : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> int:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : str , ) -> Optional[Any]:
'''simple docstring'''
snake_case : str = XLMModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Dict = model(snake_case__ , lengths=snake_case__ , langs=snake_case__ )
snake_case : Optional[Any] = model(snake_case__ , langs=snake_case__ )
snake_case : Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[Any] , ) -> str:
'''simple docstring'''
snake_case : Union[str, Any] = XLMWithLMHeadModel(snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : str = model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : str , snake_case__ : int , snake_case__ : int , ) -> Optional[int]:
'''simple docstring'''
snake_case : int = XLMForQuestionAnsweringSimple(snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : int = model(snake_case__ )
snake_case : Optional[int] = model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
snake_case : Tuple = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Tuple , ) -> str:
'''simple docstring'''
snake_case : int = XLMForQuestionAnswering(snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : int = model(snake_case__ )
snake_case : Optional[int] = model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , )
snake_case : int = model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , )
((snake_case) , ) : List[Any] = result_with_labels.to_tuple()
snake_case : Optional[Any] = model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
((snake_case) , ) : int = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Any , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Dict , ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = XLMForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Dict = model(snake_case__ )
snake_case : List[str] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Any , snake_case__ : Any , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[int] , ) -> Tuple:
'''simple docstring'''
snake_case : Optional[int] = self.num_labels
snake_case : Tuple = XLMForTokenClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Dict = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Any , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[str] = self.num_choices
snake_case : str = XLMForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Union[str, Any] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Dict = config_and_inputs
snake_case : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,A_ ,unittest.TestCase ):
A__ : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A__ : Any = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A__ : str = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : List[str] ) -> int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : str=False ) -> int:
'''simple docstring'''
snake_case : Tuple = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
snake_case : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
snake_case : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def _SCREAMING_SNAKE_CASE (self : Dict ) -> int:
'''simple docstring'''
snake_case : Any = XLMModelTester(self )
snake_case : List[str] = ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[Any]:
'''simple docstring'''
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str ) -> Dict:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any:
'''simple docstring'''
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Any:
'''simple docstring'''
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Optional[Any]=False , snake_case__ : int=1 ) -> Optional[int]:
'''simple docstring'''
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertListEqual(
[isinstance(snake_case__ , snake_case__ ) for iter_attentions in attentions] , [True] * len(snake_case__ ) )
self.assertEqual(len(snake_case__ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(snake_case__ ):
# adds PAD dummy token
snake_case : int = min_length + idx + 1
snake_case : Any = min_length + idx + 1
snake_case : str = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[int]=False , snake_case__ : str=1 ) -> Dict:
'''simple docstring'''
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertListEqual(
[isinstance(snake_case__ , snake_case__ ) for iter_hidden_states in hidden_states] , [True] * len(snake_case__ ) , )
self.assertEqual(len(snake_case__ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(snake_case__ ):
# adds PAD dummy token
snake_case : str = min_length + idx + 1
snake_case : Tuple = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(snake_case__ ) , )
pass
@slow
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[str]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Dict = XLMModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : int = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(snake_case__ )
snake_case : Any = torch.tensor([[14, 4_47]] , dtype=torch.long , device=snake_case__ ) # the president
snake_case : Dict = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
snake_case : Optional[int] = model.generate(snake_case__ , do_sample=snake_case__ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , snake_case__ )
| 59
|
import argparse
import copy
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = {}
with open(_lowerCamelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_lowerCAmelCase : Tuple = []
_list.append([line.split()[1], line.split()[2]] )
_lowerCAmelCase : Any = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_lowerCAmelCase : str = []
_list.append([line.split()[0], line.split()[2]] )
_lowerCAmelCase : Any = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase ) as f:
_lowerCAmelCase : str = f.read(1 )
_lowerCAmelCase : str = start_node
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Any = start_node
_lowerCAmelCase : str = 0
while visiting not in first_solution:
_lowerCAmelCase : Dict = 10_000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_lowerCamelCase ) and k[0] not in first_solution:
_lowerCAmelCase : List[str] = k[1]
_lowerCAmelCase : List[Any] = k[0]
first_solution.append(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = distance_of_first_solution + int(_lowerCamelCase )
_lowerCAmelCase : str = best_node
first_solution.append(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_lowerCAmelCase : Tuple = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10_000
)
return first_solution, distance_of_first_solution
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = []
for n in solution[1:-1]:
_lowerCAmelCase : Dict = solution.index(_lowerCamelCase )
for kn in solution[1:-1]:
_lowerCAmelCase : Dict = solution.index(_lowerCamelCase )
if n == kn:
continue
_lowerCAmelCase : Optional[int] = copy.deepcopy(_lowerCamelCase )
_lowerCAmelCase : int = kn
_lowerCAmelCase : Dict = n
_lowerCAmelCase : Optional[int] = 0
for k in _tmp[:-1]:
_lowerCAmelCase : str = _tmp[_tmp.index(_lowerCamelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_lowerCAmelCase : Optional[Any] = distance + int(i[1] )
_tmp.append(_lowerCamelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_lowerCAmelCase : List[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _lowerCamelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : int = first_solution
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Tuple = distance_of_first_solution
_lowerCAmelCase : Optional[int] = solution
while count <= iters:
_lowerCAmelCase : int = find_neighborhood(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = neighborhood[index_of_best_solution]
_lowerCAmelCase : int = len(_lowerCamelCase ) - 1
_lowerCAmelCase : Union[str, Any] = False
while not found:
_lowerCAmelCase : Tuple = 0
while i < len(_lowerCamelCase ):
if best_solution[i] != solution[i]:
_lowerCAmelCase : str = best_solution[i]
_lowerCAmelCase : Tuple = solution[i]
break
_lowerCAmelCase : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Optional[Any] = best_solution[:-1]
_lowerCAmelCase : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_lowerCAmelCase : Union[str, Any] = cost
_lowerCAmelCase : List[Any] = solution
else:
_lowerCAmelCase : Optional[Any] = index_of_best_solution + 1
_lowerCAmelCase : Optional[Any] = neighborhood[index_of_best_solution]
if len(_lowerCamelCase ) >= size:
tabu_list.pop(0 )
_lowerCAmelCase : int = count + 1
return best_solution_ever, best_cost
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = generate_neighbours(args.File )
_lowerCAmelCase , _lowerCAmelCase : List[str] = generate_first_solution(
args.File , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Any = tabu_search(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 36
| 0
|
"""simple docstring"""
from math import factorial
def _snake_case ( _snake_case : int = 100 ):
return sum(int(_snake_case ) for x in str(factorial(_snake_case ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 60
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = BartphoTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : str = ["▁This", "▁is", "▁a", "▁t", "est"]
_lowerCAmelCase : List[str] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file, "w", encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n")
_lowerCAmelCase : Optional[Any] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self, **__a):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "This is a là test"
_lowerCAmelCase : Optional[int] = "This is a<unk><unk> test"
return input_text, output_text
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map)
_lowerCAmelCase : List[Any] = "This is a là test"
_lowerCAmelCase : str = "▁This ▁is ▁a ▁l à ▁t est".split()
_lowerCAmelCase : str = tokenizer.tokenize(__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a), __a)
| 36
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ : Union[str, Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase_ : Tuple = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
UpperCAmelCase_ : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCAmelCase_ : int = {"unk_token": "<unk>"}
UpperCAmelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
UpperCAmelCase_ : List[Any] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase_ : Tuple = self.get_image_processor()
UpperCAmelCase_ : List[str] = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
UpperCAmelCase_ : int = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase_ )
self.assertIsInstance(processor_fast.tokenizer , lowercase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase_ )
self.assertIsInstance(processor_fast.image_processor , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase_ : Union[str, Any] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
UpperCAmelCase_ : List[Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.get_image_processor()
UpperCAmelCase_ : str = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase_ : int = image_processor(lowercase_ , return_tensors="np" )
UpperCAmelCase_ : List[str] = processor(images=lowercase_ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = self.get_image_processor()
UpperCAmelCase_ : str = self.get_tokenizer()
UpperCAmelCase_ : int = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
UpperCAmelCase_ : Any = "lower newer"
UpperCAmelCase_ : Tuple = processor(text=lowercase_ )
UpperCAmelCase_ : int = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.get_image_processor()
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
UpperCAmelCase_ : Any = "lower newer"
UpperCAmelCase_ : Tuple = self.prepare_image_inputs()
UpperCAmelCase_ : List[Any] = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.get_image_processor()
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : str = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : Dict = processor.batch_decode(lowercase_ )
UpperCAmelCase_ : Tuple = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Any = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
UpperCAmelCase_ : List[str] = "lower newer"
UpperCAmelCase_ : List[Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Tuple = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 61
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def constraint_to_multiple_of(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=None ):
_lowerCAmelCase : Tuple = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
_lowerCAmelCase : List[str] = math.ceil(val / multiple ) * multiple
return x
_lowerCAmelCase : Union[str, Any] = (output_size, output_size) if isinstance(_lowerCamelCase , _lowerCamelCase ) else output_size
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = get_image_size(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Any = output_size
# determine new height and width
_lowerCAmelCase : List[Any] = output_height / input_height
_lowerCAmelCase : Any = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowerCAmelCase : Union[str, Any] = scale_width
else:
# fit height
_lowerCAmelCase : Union[str, Any] = scale_height
_lowerCAmelCase : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCamelCase )
_lowerCAmelCase : Dict = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCamelCase )
return (new_height, new_width)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ['pixel_values']
def __init__( self, __a = True, __a = None, __a = PILImageResampling.BILINEAR, __a = False, __a = 1, __a = True, __a = 1 / 255, __a = True, __a = None, __a = None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Any = size if size is not None else {"height": 384, "width": 384}
_lowerCAmelCase : Optional[int] = get_size_dict(__a)
_lowerCAmelCase : Optional[Any] = do_resize
_lowerCAmelCase : Dict = size
_lowerCAmelCase : Any = keep_aspect_ratio
_lowerCAmelCase : str = ensure_multiple_of
_lowerCAmelCase : str = resample
_lowerCAmelCase : Dict = do_rescale
_lowerCAmelCase : Optional[int] = rescale_factor
_lowerCAmelCase : Dict = do_normalize
_lowerCAmelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self, __a, __a, __a = False, __a = 1, __a = PILImageResampling.BICUBIC, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}")
_lowerCAmelCase : List[Any] = get_resize_output_image_size(
__a, output_size=(size["height"], size["width"]), keep_aspect_ratio=__a, multiple=__a, )
return resize(__a, size=__a, resample=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return rescale(__a, scale=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return normalize(__a, mean=__a, std=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = ChannelDimension.FIRST, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : List[Any] = size if size is not None else self.size
_lowerCAmelCase : str = get_size_dict(__a)
_lowerCAmelCase : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowerCAmelCase : Any = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowerCAmelCase : int = resample if resample is not None else self.resample
_lowerCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : List[str] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : Optional[Any] = make_list_of_images(__a)
if not valid_images(__a):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
_lowerCAmelCase : List[Any] = [to_numpy_array(__a) for image in images]
if do_resize:
_lowerCAmelCase : Any = [self.resize(image=__a, size=__a, resample=__a) for image in images]
if do_rescale:
_lowerCAmelCase : List[str] = [self.rescale(image=__a, scale=__a) for image in images]
if do_normalize:
_lowerCAmelCase : Dict = [self.normalize(image=__a, mean=__a, std=__a) for image in images]
_lowerCAmelCase : List[str] = [to_channel_dimension_format(__a, __a) for image in images]
_lowerCAmelCase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=__a, tensor_type=__a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__a) != len(__a):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(__a):
_lowerCAmelCase : List[Any] = target_sizes.numpy()
_lowerCAmelCase : Dict = []
for idx in range(len(__a)):
_lowerCAmelCase : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=__a)
_lowerCAmelCase : int = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(__a)
else:
_lowerCAmelCase : Dict = logits.argmax(dim=1)
_lowerCAmelCase : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 36
| 0
|
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =generate_pascal_triangle(SCREAMING_SNAKE_CASE__ )
for row_idx in range(SCREAMING_SNAKE_CASE__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
__UpperCamelCase =[]
for current_row_idx in range(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =populate_current_row(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
triangle.append(SCREAMING_SNAKE_CASE__ )
return triangle
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__UpperCamelCase , __UpperCamelCase =1, 1
for current_col_idx in range(1 , SCREAMING_SNAKE_CASE__ ):
calculate_current_element(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return current_row
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ):
__UpperCamelCase =triangle[current_row_idx - 1][current_col_idx - 1]
__UpperCamelCase =triangle[current_row_idx - 1][current_col_idx]
__UpperCamelCase =above_to_left_elt + above_to_right_elt
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
__UpperCamelCase =[[1]]
for row_index in range(1 , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =[0] + result[-1] + [0]
__UpperCamelCase =row_index + 1
# Calculate the number of distinct elements in a row
__UpperCamelCase =sum(divmod(SCREAMING_SNAKE_CASE__ , 2 ) )
__UpperCamelCase =[
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__UpperCamelCase =row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__UpperCamelCase =row_first_half + row_second_half
result.append(SCREAMING_SNAKE_CASE__ )
return result
def _UpperCAmelCase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(SCREAMING_SNAKE_CASE__ : Callable , SCREAMING_SNAKE_CASE__ : int ) -> None:
__UpperCamelCase =F'{func.__name__}({value})'
__UpperCamelCase =timeit(F'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 62
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = "huggingface/label-files"
_lowerCAmelCase : int = "imagenet-1k-id2label.json"
_lowerCAmelCase : Tuple = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : Tuple = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : Tuple = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_lowerCAmelCase : Optional[int] = BitConfig(
conv_layer=_lowerCamelCase , num_labels=1_000 , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , )
return config
def A ( _lowerCamelCase ):
'''simple docstring'''
if "stem.conv" in name:
_lowerCAmelCase : List[str] = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
_lowerCAmelCase : Any = name.replace("blocks" , "layers" )
if "head.fc" in name:
_lowerCAmelCase : Optional[Any] = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
_lowerCAmelCase : Any = "bit." + name
if "bit" not in name and "classifier" not in name:
_lowerCAmelCase : Dict = "bit.encoder." + name
return name
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = get_config(_lowerCamelCase )
# load original model from timm
_lowerCAmelCase : int = create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model
_lowerCAmelCase : Any = timm_model.state_dict()
for key in state_dict.copy().keys():
_lowerCAmelCase : Dict = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val.squeeze() if "head" in key else val
# load HuggingFace model
_lowerCAmelCase : Optional[Any] = BitForImageClassification(_lowerCamelCase )
model.eval()
model.load_state_dict(_lowerCamelCase )
# create image processor
_lowerCAmelCase : Dict = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = transform.transforms
_lowerCAmelCase : Tuple = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
_lowerCAmelCase : Tuple = BitImageProcessor(
do_resize=_lowerCamelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Any = transform(_lowerCamelCase ).unsqueeze(0 )
_lowerCAmelCase : Optional[int] = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
# verify logits
with torch.no_grad():
_lowerCAmelCase : Tuple = model(_lowerCamelCase )
_lowerCAmelCase : str = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
_lowerCAmelCase : Union[str, Any] = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(F"ybelkada/{model_name}" )
processor.push_to_hub(F"ybelkada/{model_name}" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
_snake_case = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 36
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =[
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : Union[str, Any] , **__a : Dict ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_a = deprecated_arg[3:]
_a = not kwargs.pop(__a )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
_a = kwargs.pop("tpu_name" , self.tpu_name )
_a = kwargs.pop("device_idx" , self.device_idx )
_a = kwargs.pop("eager_mode" , self.eager_mode )
_a = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**__a )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'Name of TPU'} , )
__a =field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
__a =field(default=lowerCamelCase_ , metadata={'help': 'Benchmark models in eager model.'} )
__a =field(
default=lowerCamelCase_ , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def UpperCamelCase__ ( self : List[str] ):
requires_backends(self , ["tf"] )
_a = None
if self.tpu:
try:
if self.tpu_name:
_a = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_a = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_a = None
return tpu
@cached_property
def UpperCamelCase__ ( self : Optional[int] ):
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_a = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
_a = tf.distribute.OneDeviceStrategy(device=f'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
_a = tf.distribute.OneDeviceStrategy(device=f'/cpu:{self.device_idx}' )
return strategy
@property
def UpperCamelCase__ ( self : List[Any] ):
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def UpperCamelCase__ ( self : str ):
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def UpperCamelCase__ ( self : Tuple ):
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def UpperCamelCase__ ( self : Tuple ):
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCamelCase__ ( self : Tuple ):
return self.n_gpu > 0
| 63
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'swin'
lowerCamelCase__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=[2, 2, 6, 2], __a=[3, 6, 12, 24], __a=7, __a=4.0, __a=True, __a=0.0, __a=0.0, __a=0.1, __a="gelu", __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Any = image_size
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : Tuple = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Optional[Any] = len(__a)
_lowerCAmelCase : int = num_heads
_lowerCAmelCase : int = window_size
_lowerCAmelCase : int = mlp_ratio
_lowerCAmelCase : List[Any] = qkv_bias
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Any = drop_path_rate
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Tuple = use_absolute_embeddings
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Tuple = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : List[str] = int(embed_dim * 2 ** (len(__a) - 1))
_lowerCAmelCase : List[Any] = ["stem"] + [f"stage{idx}" for idx in range(1, len(__a) + 1)]
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 36
| 0
|
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A_ = '''src/transformers'''
A_ = '''docs/source/en/tasks'''
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
with open(snake_case__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_snake_case : Optional[Any] = f.readlines()
# Find the start prompt.
_snake_case : Union[str, Any] = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
_snake_case : Optional[int] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A_ = direct_transformers_import(TRANSFORMERS_PATH)
A_ = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A_ = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
_snake_case : Optional[int] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
_snake_case : Dict = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Optional[int]=False ):
"""simple docstring"""
_snake_case , _snake_case , _snake_case , _snake_case : Optional[Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
_snake_case : Optional[int] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
""" to fix this.""" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 64
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 36
| 0
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
UpperCamelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
UpperCamelCase__ = '</w>'
UpperCamelCase__ = '@@ '
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = set()
UpperCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ = char
return pairs
# Speech2Text2 has no max input length
UpperCamelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ['input_ids', 'attention_mask']
def __init__(self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict="<s>" , __UpperCAmelCase : Tuple="<pad>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : str=None , **__UpperCAmelCase : Optional[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(
unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = do_lower_case
with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase__ = json.load(__UpperCAmelCase )
UpperCAmelCase__ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
else:
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase__ = [tuple(merge.split()[:2] ) for merge in merges]
UpperCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
UpperCAmelCase__ = {}
@property
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
return len(self.decoder )
def lowercase_ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ (self : Dict , __UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
UpperCAmelCase__ = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ = bigram
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
while i < len(__UpperCAmelCase ):
try:
UpperCAmelCase__ = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ = tuple(__UpperCAmelCase )
UpperCAmelCase__ = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
UpperCAmelCase__ = "\n" + BPE_TOKEN_MERGES
if word.endswith(__UpperCAmelCase ):
UpperCAmelCase__ = word.replace(__UpperCAmelCase , "" )
UpperCAmelCase__ = word.replace(" " , __UpperCAmelCase )
UpperCAmelCase__ = word
return word
def lowercase_ (self : Tuple , __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
UpperCAmelCase__ = text.lower()
UpperCAmelCase__ = text.split()
UpperCAmelCase__ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(" " ) ) )
return split_tokens
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> int:
"""simple docstring"""
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase_ (self : Any , __UpperCAmelCase : int ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.decoder.get(__UpperCAmelCase , self.unk_token )
return result
def lowercase_ (self : Dict , __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
# make sure @@ tokens are concatenated
UpperCAmelCase__ = "".join(string.split(__UpperCAmelCase ) )
return string
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" )
UpperCAmelCase__ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase__ = token_index
writer.write(" ".join(__UpperCAmelCase ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 65
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
_snake_case = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if got_ver is None or want_ver is None:
raise ValueError(
F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
F" reinstalling {pkg}." )
if not ops[op](version.parse(_lowerCamelCase ) , version.parse(_lowerCamelCase ) ):
raise ImportError(
F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def A ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = F"\n{hint}" if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$" , _lowerCamelCase ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = requirement, None, None
else:
_lowerCAmelCase : Optional[int] = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , _lowerCamelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
F" got {requirement}" )
_lowerCAmelCase , _lowerCAmelCase : Dict = match[0]
_lowerCAmelCase : Any = want_full.split("," ) # there could be multiple requirements
_lowerCAmelCase : Optional[int] = {}
for w in want_range:
_lowerCAmelCase : Any = re.findall(r"^([\s!=<>]{1,2})(.+)" , _lowerCamelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
F" but got {requirement}" )
_lowerCAmelCase , _lowerCAmelCase : Tuple = match[0]
_lowerCAmelCase : Union[str, Any] = want_ver
if op not in ops:
raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
_lowerCAmelCase : Tuple = ".".join([str(_lowerCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return
# check if any version is installed
try:
_lowerCAmelCase : Any = importlib.metadata.version(_lowerCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(_lowerCamelCase , _lowerCamelCase )
| 36
| 0
|
"""simple docstring"""
__a = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__a = [{"type": "code", "content": INSTALL_CONTENT}]
__a = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 66
|
import argparse
from collections import defaultdict
import yaml
_snake_case = "docs/source/en/_toctree.yml"
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = defaultdict(_lowerCamelCase )
_lowerCAmelCase : Any = []
_lowerCAmelCase : List[str] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = new_doc_list
_lowerCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase : str = []
for duplicate_key in duplicates:
_lowerCAmelCase : List[str] = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(_lowerCamelCase ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
_lowerCAmelCase : Optional[Any] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowerCamelCase ) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed." )
overview_doc.extend(_lowerCamelCase )
# Sort
return overview_doc
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : int = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : List[str] = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : Union[str, Any] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowerCAmelCase : Optional[Any] = api_doc[scheduler_idx]["sections"]
_lowerCAmelCase : Optional[Any] = clean_doc_toc(_lowerCamelCase )
_lowerCAmelCase : int = False
if new_scheduler_doc != scheduler_doc:
_lowerCAmelCase : List[Any] = True
if overwrite:
_lowerCAmelCase : Dict = new_scheduler_doc
if diff:
if overwrite:
_lowerCAmelCase : Tuple = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : int = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : List[str] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Optional[int] = api_doc[pipeline_idx]["sections"]
_lowerCAmelCase : Tuple = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowerCAmelCase : List[Any] = pipeline_doc["section"]
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
if overwrite:
_lowerCAmelCase : Optional[Any] = new_sub_pipeline_doc
new_pipeline_docs.append(_lowerCamelCase )
# sort overall pipeline doc
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
if new_pipeline_docs != pipeline_docs:
_lowerCAmelCase : Dict = True
if overwrite:
_lowerCAmelCase : Optional[int] = new_pipeline_docs
if diff:
if overwrite:
_lowerCAmelCase : Optional[int] = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_snake_case = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 36
| 0
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCAmelCase ( UpperCamelCase__ ) -> Tuple:
__lowerCamelCase = filter(lambda UpperCamelCase__ : p.requires_grad , model.parameters() )
__lowerCamelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__UpperCAmelCase =logging.getLogger(__name__)
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
if metric == "rouge2":
__lowerCamelCase = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
__lowerCamelCase = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
__lowerCamelCase = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
__lowerCamelCase = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
__lowerCamelCase = ModelCheckpoint(
dirpath=UpperCamelCase__ , filename=UpperCamelCase__ , monitor=f"""val_{metric}""" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
return EarlyStopping(
monitor=f"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=UpperCamelCase__ , verbose=UpperCamelCase__ , )
class a__ ( pl.Callback ):
def SCREAMING_SNAKE_CASE__ ( self : Any , a : str , a : List[Any] ):
"""simple docstring"""
__lowerCamelCase = {f"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(a )
@rank_zero_only
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : pl.Trainer , a : pl.LightningModule , a : str , a : List[Any]=True ):
"""simple docstring"""
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__lowerCamelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
__lowerCamelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowerCamelCase = od / '''test_results.txt'''
__lowerCamelCase = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowerCamelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
__lowerCamelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=a )
generations_file.parent.mkdir(exist_ok=a )
with open(a , '''a+''' ) as writer:
for key in sorted(a ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowerCamelCase = metrics[key]
if isinstance(a , torch.Tensor ):
__lowerCamelCase = val.item()
__lowerCamelCase = f"""{key}: {val:.6f}\n"""
writer.write(a )
if not save_generations:
return
if "preds" in metrics:
__lowerCamelCase = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(a )
@rank_zero_only
def SCREAMING_SNAKE_CASE__ ( self : str , a : Dict , a : List[str] ):
"""simple docstring"""
try:
__lowerCamelCase = pl_module.model.model.num_parameters()
except AttributeError:
__lowerCamelCase = pl_module.model.num_parameters()
__lowerCamelCase = count_trainable_parameters(a )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def SCREAMING_SNAKE_CASE__ ( self : int , a : pl.Trainer , a : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(a , a , '''test''' )
@rank_zero_only
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : pl.Trainer , a : Optional[int] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 67
|
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
| 0
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> int:
'''simple docstring'''
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
A__ = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ )
A__ = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
TestCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Parse args
A__ , A__ = parser.parse_known_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ):
parser.print_help()
exit(1 )
A__ = parse_unknown_args(SCREAMING_SNAKE_CASE_ )
# Run
A__ = args.func(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 68
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
_snake_case = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class UpperCAmelCase_ ( a):
def __init__( self, __a = 101):
'''simple docstring'''
_lowerCAmelCase : str = length
def __len__( self):
'''simple docstring'''
return self.length
def __getitem__( self, __a):
'''simple docstring'''
return i
class UpperCAmelCase_ :
def __call__( self, __a):
'''simple docstring'''
return {"input_ids": torch.tensor(__a), "labels": torch.tensor(__a)}
class UpperCAmelCase_ ( nn.Module):
def __init__( self):
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_lowerCAmelCase : str = nn.Linear(120, 80)
def snake_case__ ( self, __a, __a=None):
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0, device=input_ids.device), input_ids
else:
return input_ids
class UpperCAmelCase_ ( a):
@require_torch_neuroncore
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = f"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
_lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[int] = f"--output_dir {output_dir}".split()
_lowerCAmelCase : List[Any] = ["torchrun"] + distributed_args + args
execute_subprocess_async(__a, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class UpperCAmelCase_ ( a):
@require_torch_multi_gpu
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = f"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
_lowerCAmelCase : Any = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[int] = f"--output_dir {output_dir}".split()
_lowerCAmelCase : Any = ["torchrun"] + distributed_args + args
execute_subprocess_async(__a, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
_snake_case = HfArgumentParser((TrainingArguments,))
_snake_case = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
_snake_case = DummyDataset(dataset_length)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = list(range(len(_lowerCamelCase ) ) )
_lowerCAmelCase : Union[str, Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
F"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
_snake_case = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
_snake_case = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_snake_case = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_snake_case = 2
_snake_case = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_snake_case = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_snake_case = None
| 36
| 0
|
"""simple docstring"""
from math import sqrt
def UpperCAmelCase ( UpperCAmelCase = 1000000 ) -> int:
snake_case_ = 0
snake_case_ = 0
snake_case_ = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(UpperCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 69
|
from __future__ import annotations
import bisect
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
if hi < 0:
_lowerCAmelCase : int = len(_lowerCamelCase )
while lo < hi:
_lowerCAmelCase : Optional[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_lowerCAmelCase : Union[str, Any] = mid + 1
else:
_lowerCAmelCase : str = mid
return lo
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
if hi < 0:
_lowerCAmelCase : str = len(_lowerCamelCase )
while lo < hi:
_lowerCAmelCase : Tuple = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_lowerCAmelCase : Dict = mid + 1
else:
_lowerCAmelCase : str = mid
return lo
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_left(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_right(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 0
_lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase ) - 1
while left <= right:
_lowerCAmelCase : int = left + (right - left) // 2
_lowerCAmelCase : int = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_lowerCAmelCase : str = midpoint - 1
else:
_lowerCAmelCase : Any = midpoint + 1
return None
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = bisect.bisect_left(_lowerCamelCase , _lowerCamelCase )
if index != len(_lowerCamelCase ) and sorted_collection[index] == item:
return index
return None
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if right < left:
return None
_lowerCAmelCase : Optional[int] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , midpoint + 1 , _lowerCamelCase )
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by comma:\n").strip()
_snake_case = sorted(int(item) for item in user_input.split(","))
_snake_case = int(input("Enter a single number to be found in the list:\n"))
_snake_case = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''')
| 36
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Tuple ={
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] =[
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
A__ : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCAmelCase_ ( a):
def snake_case__ ( self, __a):
'''simple docstring'''
return 0.0
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCAmelCase : Optional[int] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 512
_lowerCAmelCase : Union[str, Any] = [1] + [0] * (size - 1)
_lowerCAmelCase : Optional[Any] = [filter_type.process(_lowerCamelCase ) for item in inputs]
_lowerCAmelCase : int = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase : str = np.abs(np.fft.fft(_lowerCamelCase ) )
_lowerCAmelCase : Union[str, Any] = 20 * np.logaa(_lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
_lowerCAmelCase : List[Any] = get_bounds(_lowerCamelCase , _lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(_lowerCamelCase )
plt.show()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 512
_lowerCAmelCase : Optional[Any] = [1] + [0] * (size - 1)
_lowerCAmelCase : str = [filter_type.process(_lowerCamelCase ) for item in inputs]
_lowerCAmelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase : Optional[Any] = np.angle(np.fft.fft(_lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(_lowerCamelCase , -2 * pi ) )
plt.show()
| 36
| 0
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : int =["""image_processor""", """tokenizer"""]
UpperCamelCase__ : Any ="""BridgeTowerImageProcessor"""
UpperCamelCase__ : List[str] =("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = True , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
# add pixel_values + pixel_mask
__UpperCamelCase : Tuple =self.image_processor(
lowerCamelCase__ , return_tensors=lowerCamelCase__ , do_normalize=lowerCamelCase__ , do_center_crop=lowerCamelCase__ , **lowerCamelCase__ )
encoding.update(lowerCamelCase__ )
return encoding
def __lowercase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.tokenizer.model_input_names
__UpperCamelCase : Optional[int] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 71
|
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
_lowerCAmelCase : List[str] = gray_code_sequence_string(_lowerCamelCase )
#
# convert them to integers
for i in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : List[str] = int(sequence[i] , 2 )
return sequence
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_lowerCAmelCase : List[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_lowerCAmelCase : Optional[int] = gray_code_sequence_string(bit_count - 1 )
_lowerCAmelCase : str = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_lowerCAmelCase : Dict = "0" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_lowerCAmelCase : Optional[Any] = "1" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
| 0
|
"""simple docstring"""
from math import isqrt, loga
def snake_case_ ( A_ : int ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = [True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, A_, A_ ):
_lowerCamelCase : str = False
return [i for i in range(2, A_ ) if is_prime[i]]
def snake_case_ ( A_ : int = 80_08_00, A_ : int = 80_08_00 ):
'''simple docstring'''
_lowerCamelCase : Dict = degree * loga(A_ )
_lowerCamelCase : Any = int(A_ )
_lowerCamelCase : List[Any] = calculate_prime_numbers(A_ )
_lowerCamelCase : Dict = 0
_lowerCamelCase : Dict = 0
_lowerCamelCase : str = len(A_ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 72
|
from PIL import Image
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = image.size
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Tuple = image.load()
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCamelCase ):
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_snake_case = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 36
| 0
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : List[Any] = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__lowerCamelCase : Any = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
__lowerCamelCase : Union[str, Any] = F"{src_lang}-{tgt_lang}"
__lowerCamelCase : Any = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
__lowerCamelCase : str = os.path.join(lowerCamelCase__ , 'README.md' )
print(F"Generating {path}" )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(lowerCamelCase__ )
# make sure we are under the root of the project
a =Path(__file__).resolve().parent.parent.parent
a =repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a , a , a =model_name.split("""-""")
a =model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 73
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'wav2vec2'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : Union[str, Any] = feat_extract_activation
_lowerCAmelCase : Optional[Any] = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : str = list(__a)
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : str = num_conv_pos_embeddings
_lowerCAmelCase : List[Any] = num_conv_pos_embedding_groups
_lowerCAmelCase : str = len(self.conv_dim)
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : Optional[Any] = hidden_dropout
_lowerCAmelCase : List[str] = attention_dropout
_lowerCAmelCase : Tuple = activation_dropout
_lowerCAmelCase : int = feat_proj_dropout
_lowerCAmelCase : List[str] = final_dropout
_lowerCAmelCase : int = layerdrop
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : Optional[Any] = do_stable_layer_norm
_lowerCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : str = apply_spec_augment
_lowerCAmelCase : Optional[Any] = mask_time_prob
_lowerCAmelCase : Optional[int] = mask_time_length
_lowerCAmelCase : List[str] = mask_time_min_masks
_lowerCAmelCase : Optional[int] = mask_feature_prob
_lowerCAmelCase : Optional[int] = mask_feature_length
_lowerCAmelCase : List[str] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : Union[str, Any] = num_codevectors_per_group
_lowerCAmelCase : str = num_codevector_groups
_lowerCAmelCase : Optional[int] = contrastive_logits_temperature
_lowerCAmelCase : Optional[int] = feat_quantizer_dropout
_lowerCAmelCase : Optional[int] = num_negatives
_lowerCAmelCase : Union[str, Any] = codevector_dim
_lowerCAmelCase : Any = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Tuple = ctc_loss_reduction
_lowerCAmelCase : Tuple = ctc_zero_infinity
# adapter
_lowerCAmelCase : List[Any] = add_adapter
_lowerCAmelCase : List[str] = adapter_kernel_size
_lowerCAmelCase : str = adapter_stride
_lowerCAmelCase : List[str] = num_adapter_layers
_lowerCAmelCase : str = output_hidden_size or hidden_size
_lowerCAmelCase : Tuple = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : str = list(__a)
_lowerCAmelCase : Union[str, Any] = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : Tuple = xvector_output_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 36
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( snake_case__ : Union[str, Any] ):
A = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Any = StableDiffusionLatentUpscalePipeline
_lowerCamelCase: Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
_lowerCamelCase: List[str] = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
_lowerCamelCase: int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase: Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase: Optional[int] = frozenset([] )
_lowerCamelCase: Tuple = True
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
A = 1
A = 4
A = (16, 16)
A = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(A_ )
return image
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
torch.manual_seed(0 )
A = UNetaDConditionModel(
act_fn='gelu' ,attention_head_dim=8 ,norm_num_groups=A_ ,block_out_channels=[32, 32, 64, 64] ,time_cond_proj_dim=160 ,conv_in_kernel=1 ,conv_out_kernel=1 ,cross_attention_dim=32 ,down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) ,in_channels=8 ,mid_block_type=A_ ,only_cross_attention=A_ ,out_channels=5 ,resnet_time_scale_shift='scale_shift' ,time_embedding_type='fourier' ,timestep_post_act='gelu' ,up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') ,)
A = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
A = EulerDiscreteScheduler(prediction_type='sample' )
A = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='quick_gelu' ,projection_dim=512 ,)
A = CLIPTextModel(A_ )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Union[str, Any]=0 ) -> List[Any]:
if str(A_ ).startswith('mps' ):
A = torch.manual_seed(A_ )
else:
A = torch.Generator(device=A_ ).manual_seed(A_ )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = 'cpu'
A = self.get_dummy_components()
A = self.pipeline_class(**A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 256, 256, 3) )
A = np.array(
[0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] )
A = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A_ ,1e-3 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
super().test_save_load_local(expected_max_difference=3e-3 )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
A = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
A = self.get_dummy_components()
A = self.pipeline_class(**A_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = 2
A = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
A = getattr(A_ ,scheduler_enum.name )
A = scheduler_cls.from_config(pipe.scheduler.config )
A = pipe(**A_ )[0]
outputs.append(A_ )
assert check_same_shape(A_ )
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
A = torch.manual_seed(33 )
A = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ,torch_dtype=torch.floataa )
pipe.to('cuda' )
A = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' ,torch_dtype=torch.floataa )
upscaler.to('cuda' )
A = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
A = pipe(A_ ,generator=A_ ,output_type='latent' ).images
A = upscaler(
prompt=A_ ,image=A_ ,num_inference_steps=20 ,guidance_scale=0 ,generator=A_ ,output_type='np' ,).images[0]
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
A = torch.manual_seed(33 )
A = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' ,torch_dtype=torch.floataa )
upscaler.to('cuda' )
A = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
A = upscaler(
prompt=A_ ,image=A_ ,num_inference_steps=20 ,guidance_scale=0 ,generator=A_ ,output_type='np' ,).images[0]
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 74
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , a , )
class UpperCAmelCase_ ( a):
lowerCamelCase__ = RobertaConfig
lowerCamelCase__ = 'roberta'
def __init__( self, __a):
'''simple docstring'''
super().__init__(__a)
_lowerCAmelCase : Optional[Any] = RobertaEmbeddings(__a)
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , a , )
class UpperCAmelCase_ ( a):
lowerCamelCase__ = RobertaConfig
lowerCamelCase__ = 'roberta'
def __init__( self, __a):
'''simple docstring'''
super().__init__(__a)
_lowerCAmelCase : Optional[int] = config.num_labels
_lowerCAmelCase : Optional[int] = config.num_hidden_layers
_lowerCAmelCase : Optional[int] = DeeRobertaModel(__a)
_lowerCAmelCase : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob)
_lowerCAmelCase : List[str] = nn.Linear(config.hidden_size, self.config.num_labels)
@add_start_docstrings_to_model_forward(__a)
def snake_case__ ( self, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=-1, __a=False, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.num_layers
try:
_lowerCAmelCase : List[Any] = self.roberta(
__a, attention_mask=__a, token_type_ids=__a, position_ids=__a, head_mask=__a, inputs_embeds=__a, )
_lowerCAmelCase : List[Any] = outputs[1]
_lowerCAmelCase : Dict = self.dropout(__a)
_lowerCAmelCase : Dict = self.classifier(__a)
_lowerCAmelCase : Optional[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : Union[str, Any] = e.exit_layer
_lowerCAmelCase : List[Any] = outputs[0]
if not self.training:
_lowerCAmelCase : int = entropy(__a)
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : str = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Optional[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1), labels.view(-1))
else:
_lowerCAmelCase : Optional[Any] = CrossEntropyLoss()
_lowerCAmelCase : Optional[Any] = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
# work with highway exits
_lowerCAmelCase : Optional[int] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Any = highway_exit[0]
if not self.training:
highway_logits_all.append(__a)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[str] = MSELoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1), labels.view(-1))
else:
_lowerCAmelCase : Dict = CrossEntropyLoss()
_lowerCAmelCase : Optional[Any] = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1))
highway_losses.append(__a)
if train_highway:
_lowerCAmelCase : int = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Optional[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 36
| 0
|
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def a_ ( __snake_case : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ =checkpoints.load_tax_checkpoint(__snake_case )
lowerCamelCase_ =flatten_dict(__snake_case )
return flax_params
def a_ ( __snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ ={
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowerCamelCase_ ={
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowerCamelCase_ ='''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowerCamelCase_ =new_key.replace(__snake_case , __snake_case )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowerCamelCase_ =new_key.replace(__snake_case , __snake_case )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case )
lowerCamelCase_ =new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case )
lowerCamelCase_ =flax_dict[key]
lowerCamelCase_ ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowerCamelCase_ =torch.from_numpy(converted_dict[key].T )
else:
lowerCamelCase_ =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Any=False , __snake_case : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =get_flax_param(__snake_case )
if not use_large:
lowerCamelCase_ =PixaStructVisionConfig()
lowerCamelCase_ =PixaStructTextConfig()
else:
lowerCamelCase_ =PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowerCamelCase_ =PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowerCamelCase_ =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__snake_case )
lowerCamelCase_ =PixaStructForConditionalGeneration(__snake_case )
lowerCamelCase_ =rename_and_convert_flax_params(__snake_case )
model.load_state_dict(__snake_case )
lowerCamelCase_ =AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowerCamelCase_ =PixaStructImageProcessor()
lowerCamelCase_ =PixaStructProcessor(image_processor=__snake_case , tokenizer=__snake_case )
if use_large:
lowerCamelCase_ =4096
lowerCamelCase_ =True
# mkdir if needed
os.makedirs(__snake_case , exist_ok=__snake_case )
model.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
print('''Model saved in {}'''.format(__snake_case ) )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
a_ : Tuple = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 75
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'vision-encoder-decoder'
lowerCamelCase__ = True
def __init__( self, **__a):
'''simple docstring'''
super().__init__(**__a)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"A configuraton of type {self.model_type} cannot be instantiated because "
f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}")
_lowerCAmelCase : str = kwargs.pop("encoder")
_lowerCAmelCase : Any = encoder_config.pop("model_type")
_lowerCAmelCase : str = kwargs.pop("decoder")
_lowerCAmelCase : List[str] = decoder_config.pop("model_type")
_lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Optional[int] = True
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config")
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : str = True
return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[str] = self.encoder.to_dict()
_lowerCAmelCase : List[str] = self.decoder.to_dict()
_lowerCAmelCase : Any = self.__class__.model_type
return output
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}})
class UpperCAmelCase_ ( a):
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = OrderedDict()
_lowerCAmelCase : Any = {0: "batch", 1: "past_decoder_sequence + sequence"}
_lowerCAmelCase : List[str] = {0: "batch", 1: "past_decoder_sequence + sequence"}
_lowerCAmelCase : Optional[Any] = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def snake_case__ ( self, __a, __a = -1, __a = -1, __a = False, __a = None, ):
'''simple docstring'''
import torch
_lowerCAmelCase : Optional[Any] = OrderedDict()
_lowerCAmelCase : List[str] = super().generate_dummy_inputs(
__a, batch_size=__a, seq_length=__a, is_pair=__a, framework=__a)
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = dummy_input["input_ids"].shape
_lowerCAmelCase : str = (batch, encoder_sequence, self._config.encoder_hidden_size)
_lowerCAmelCase : List[str] = dummy_input.pop("input_ids")
_lowerCAmelCase : List[str] = dummy_input.pop("attention_mask")
_lowerCAmelCase : Optional[int] = torch.zeros(__a)
return common_inputs
class UpperCAmelCase_ ( a):
@property
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self, __a):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(__a)
def snake_case__ ( self, __a, __a, __a = "default"):
'''simple docstring'''
_lowerCAmelCase : Dict = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__a, __a)
| 36
| 0
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Any = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open(requests.get(_a , stream=_a).raw).convert("RGB")
return image
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Union[str, Any] = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding"))
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding"))
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight"))
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias"))
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight"))
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias"))
for i in range(config.vision_config.num_hidden_layers):
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight"))
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias"))
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight"))
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias"))
rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight"))
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",))
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias"))
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight"))
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias"))
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight"))
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias"))
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight"))
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias"))
# fmt: on
return rename_keys
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : int = dct.pop(_a)
SCREAMING_SNAKE_CASE : Union[str, Any] = val
def lowerCamelCase__ ( _a , _a):
for i in range(config.vision_config.num_hidden_layers):
# read in original q and v biases
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias")
SCREAMING_SNAKE_CASE : Any = state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias")
# next, set bias in the state dict
SCREAMING_SNAKE_CASE : List[Any] = torch.cat((q_bias, torch.zeros_like(_a , requires_grad=_a), v_bias))
SCREAMING_SNAKE_CASE : Union[str, Any] = qkv_bias
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Tuple = 364 if "coco" in model_name else 224
SCREAMING_SNAKE_CASE : Any = InstructBlipVisionConfig(image_size=_a).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
SCREAMING_SNAKE_CASE : Union[str, Any] = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1).to_dict()
elif "t5-xxl" in model_name:
SCREAMING_SNAKE_CASE : str = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1).to_dict()
elif "vicuna-7b" in model_name:
SCREAMING_SNAKE_CASE : Dict = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=32001).to_dict()
elif "vicuna-13b" in model_name:
SCREAMING_SNAKE_CASE : int = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=32001).to_dict()
else:
raise ValueError("Model name not supported")
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
SCREAMING_SNAKE_CASE : List[str] = InstructBlipQFormerConfig(vocab_size=30523).to_dict()
SCREAMING_SNAKE_CASE : Dict = InstructBlipConfig(vision_config=_a , text_config=_a , qformer_config=_a)
return config, image_size
@torch.no_grad()
def lowerCamelCase__ ( _a , _a=None , _a=False):
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left")
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"})
if "t5" in model_name:
SCREAMING_SNAKE_CASE : Union[str, Any] = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left")
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
SCREAMING_SNAKE_CASE : List[str] = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>")
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = get_blipa_config(_a)
SCREAMING_SNAKE_CASE : int = InstructBlipForConditionalGeneration(_a).eval()
SCREAMING_SNAKE_CASE : Dict = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = model_name_to_original[model_name]
# load original model
print("Loading original model...")
SCREAMING_SNAKE_CASE : List[str] = "cuda:1" if torch.cuda.is_available() else "cpu"
SCREAMING_SNAKE_CASE : List[str] = "cuda:2" if torch.cuda.is_available() else "cpu"
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = load_model_and_preprocess(
name=_a , model_type=_a , is_eval=_a , device=_a)
original_model.eval()
print("Done!")
# update state dict keys
SCREAMING_SNAKE_CASE : Optional[int] = original_model.state_dict()
SCREAMING_SNAKE_CASE : str = create_rename_keys(_a)
for src, dest in rename_keys:
rename_key(_a , _a , _a)
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(_a)
if key.startswith("Qformer.bert"):
SCREAMING_SNAKE_CASE : str = key.replace("Qformer.bert" , "qformer")
if "attention.self" in key:
SCREAMING_SNAKE_CASE : Any = key.replace("self" , "attention")
if "llm_proj" in key:
SCREAMING_SNAKE_CASE : List[str] = key.replace("llm_proj" , "language_projection")
if "t5_proj" in key:
SCREAMING_SNAKE_CASE : Optional[int] = key.replace("t5_proj" , "language_projection")
if key.startswith("llm_model"):
SCREAMING_SNAKE_CASE : Optional[int] = key.replace("llm_model" , "language_model")
if key.startswith("t5"):
SCREAMING_SNAKE_CASE : int = key.replace("t5" , "language")
SCREAMING_SNAKE_CASE : int = val
# read in qv biases
read_in_q_v_bias(_a , _a)
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(_a , strict=_a)
SCREAMING_SNAKE_CASE : List[str] = load_demo_image()
SCREAMING_SNAKE_CASE : int = "What is unusual about this image?"
# create processor
SCREAMING_SNAKE_CASE : Optional[int] = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=_a , image_std=_a)
SCREAMING_SNAKE_CASE : Dict = InstructBlipProcessor(
image_processor=_a , tokenizer=_a , qformer_tokenizer=_a , )
SCREAMING_SNAKE_CASE : List[Any] = processor(images=_a , text=_a , return_tensors="pt").to(_a)
# make sure processor creates exact same pixel values
SCREAMING_SNAKE_CASE : Optional[int] = vis_processors["eval"](_a).unsqueeze(0).to(_a)
SCREAMING_SNAKE_CASE : Dict = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device) , _a)
original_model.to(_a)
hf_model.to(_a)
with torch.no_grad():
if "vicuna" in model_name:
SCREAMING_SNAKE_CASE : Any = original_model({"image": original_pixel_values, "text_input": [prompt]}).logits
SCREAMING_SNAKE_CASE : List[str] = hf_model(**_a).logits
else:
SCREAMING_SNAKE_CASE : Any = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]}).logits
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer("\n" , return_tensors="pt").input_ids.to(_a)
SCREAMING_SNAKE_CASE : List[Any] = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100)
SCREAMING_SNAKE_CASE : List[Any] = hf_model(**_a , labels=_a).logits
print("First values of original logits:" , original_logits[0, :3, :3])
print("First values of HF logits:" , logits[0, :3, :3])
# assert values
assert original_logits.shape == logits.shape
SCREAMING_SNAKE_CASE : Optional[int] = 1E-4 if "vicuna" in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device) , _a , atol=_a)
print("Looks ok!")
print("Generating with original model...")
SCREAMING_SNAKE_CASE : List[Any] = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5)
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model...")
SCREAMING_SNAKE_CASE : int = hf_model.generate(
**_a , do_sample=_a , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
SCREAMING_SNAKE_CASE : Optional[int] = 2
print("Original generation:" , _a)
SCREAMING_SNAKE_CASE : List[str] = processor.batch_decode(_a , skip_special_tokens=_a)
SCREAMING_SNAKE_CASE : Optional[int] = [text.strip() for text in output_text]
print("HF generation:" , _a)
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_a)
hf_model.save_pretrained(_a)
if push_to_hub:
processor.push_to_hub(f"Salesforce/{model_name}")
hf_model.push_to_hub(f"Salesforce/{model_name}")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
a_ = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
a_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 76
|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase_ ( a):
def __get__( self, __a, __a=None):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute")
_lowerCAmelCase : List[Any] = "__cached_" + self.fget.__name__
_lowerCAmelCase : Dict = getattr(__a, __a, __a)
if cached is None:
_lowerCAmelCase : str = self.fget(__a)
setattr(__a, __a, __a)
return cached
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"invalid truth value {val!r}" )
def A ( _lowerCamelCase ):
'''simple docstring'''
if is_torch_fx_proxy(_lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(_lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(_lowerCamelCase , np.ndarray )
def A ( _lowerCamelCase ):
'''simple docstring'''
return isinstance(_lowerCamelCase , np.ndarray )
def A ( _lowerCamelCase ):
'''simple docstring'''
return _is_numpy(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import torch
return isinstance(_lowerCamelCase , torch.Tensor )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import torch
return isinstance(_lowerCamelCase , torch.device )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import torch
if isinstance(_lowerCamelCase , _lowerCamelCase ):
if hasattr(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase )
else:
return False
return isinstance(_lowerCamelCase , torch.dtype )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import tensorflow as tf
return isinstance(_lowerCamelCase , tf.Tensor )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_lowerCamelCase , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(_lowerCamelCase )
return type(_lowerCamelCase ) == tf.Tensor
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(_lowerCamelCase , jnp.ndarray )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_flax_available() else _is_jax(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
if isinstance(_lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple) ):
return [to_py_obj(_lowerCamelCase ) for o in obj]
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase ).tolist()
elif isinstance(_lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def A ( _lowerCamelCase ):
'''simple docstring'''
if isinstance(_lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple) ):
return np.array(_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase )
else:
return obj
class UpperCAmelCase_ ( a):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = fields(self)
# Safety and consistency checks
if not len(__a):
raise ValueError(f"{self.__class__.__name__} has no fields.")
if not all(field.default is None for field in class_fields[1:]):
raise ValueError(f"{self.__class__.__name__} should not have more than one required field.")
_lowerCAmelCase : Dict = getattr(self, class_fields[0].name)
_lowerCAmelCase : str = all(getattr(self, field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(__a):
if isinstance(__a, __a):
_lowerCAmelCase : Tuple = first_field.items()
_lowerCAmelCase : Dict = True
else:
try:
_lowerCAmelCase : Dict = iter(__a)
_lowerCAmelCase : Any = True
except TypeError:
_lowerCAmelCase : Any = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__a):
if (
not isinstance(__a, (list, tuple))
or not len(__a) == 2
or not isinstance(element[0], __a)
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_lowerCAmelCase : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"Cannot set key/value for {element}. It needs to be a tuple (key, value).")
break
setattr(self, element[0], element[1])
if element[1] is not None:
_lowerCAmelCase : Any = element[1]
elif first_field is not None:
_lowerCAmelCase : Any = first_field
else:
for field in class_fields:
_lowerCAmelCase : Dict = getattr(self, field.name)
if v is not None:
_lowerCAmelCase : Union[str, Any] = v
def __delitem__( self, *__a, **__a):
'''simple docstring'''
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.")
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.")
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.")
def __getitem__( self, __a):
'''simple docstring'''
if isinstance(__a, __a):
_lowerCAmelCase : Optional[int] = dict(self.items())
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self, __a, __a):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__a, __a)
super().__setattr__(__a, __a)
def __setitem__( self, __a, __a):
'''simple docstring'''
super().__setitem__(__a, __a)
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
return tuple(self[k] for k in self.keys())
class UpperCAmelCase_ ( a , a):
@classmethod
def snake_case__ ( cls, __a):
'''simple docstring'''
raise ValueError(
f"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}")
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'longest'
lowerCamelCase__ = 'max_length'
lowerCamelCase__ = 'do_not_pad'
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'pt'
lowerCamelCase__ = 'tf'
lowerCamelCase__ = 'np'
lowerCamelCase__ = 'jax'
class UpperCAmelCase_ :
def __init__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = context_managers
_lowerCAmelCase : Dict = ExitStack()
def __enter__( self):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(__a)
def __exit__( self, *__a, **__a):
'''simple docstring'''
self.stack.__exit__(*__a, **__a)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = infer_framework(_lowerCamelCase )
if framework == "tf":
_lowerCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_lowerCAmelCase : str = inspect.signature(model_class.forward ) # PyTorch models
else:
_lowerCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = model_class.__name__
_lowerCAmelCase : Optional[Any] = infer_framework(_lowerCamelCase )
if framework == "tf":
_lowerCAmelCase : Dict = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_lowerCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
_lowerCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def A ( _lowerCamelCase , _lowerCamelCase = "" , _lowerCamelCase = "." ):
'''simple docstring'''
def _flatten_dict(_lowerCamelCase , _lowerCamelCase="" , _lowerCamelCase="." ):
for k, v in d.items():
_lowerCAmelCase : Dict = str(_lowerCamelCase ) + delimiter + str(_lowerCamelCase ) if parent_key else k
if v and isinstance(_lowerCamelCase , _lowerCamelCase ):
yield from flatten_dict(_lowerCamelCase , _lowerCamelCase , delimiter=_lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) )
@contextmanager
def A ( _lowerCamelCase , _lowerCamelCase = False ):
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def A ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.transpose(_lowerCamelCase , axes=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.T if axes is None else array.permute(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.transpose(_lowerCamelCase , perm=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.transpose(_lowerCamelCase , axes=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for transpose: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.reshape(_lowerCamelCase , _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.reshape(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.reshape(_lowerCamelCase , _lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.reshape(_lowerCamelCase , _lowerCamelCase )
else:
raise ValueError(F"Type not supported for reshape: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for squeeze: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.expand_dims(_lowerCamelCase , _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.unsqueeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(_lowerCamelCase , axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.expand_dims(_lowerCamelCase , axis=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.size(_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.numel()
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.size(_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return array.size
else:
raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(_lowerCamelCase , (tuple, list) ):
_lowerCAmelCase : List[Any] = [F"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
_lowerCAmelCase : Tuple = F"{repo_id}--{value}"
return auto_map
def A ( _lowerCamelCase ):
'''simple docstring'''
for base_class in inspect.getmro(_lowerCamelCase ):
_lowerCAmelCase : Tuple = base_class.__module__
_lowerCAmelCase : int = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"Could not infer framework from class {model_class}." )
| 36
| 0
|
"""simple docstring"""
from math import sqrt
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
lowercase__ : List[Any] = True
# 0 and 1 are none primes.
if number <= 1:
lowercase__ : List[Any] = False
for divisor in range(2 , int(round(sqrt(_lowerCAmelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase__ : Any = False
break
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'status' must been from type bool"
return status
def a_ ( _lowerCAmelCase : Dict ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase__ : int = list(range(2 , n + 1 ) )
lowercase__ : Optional[int] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + 1 , len(_lowerCAmelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase__ : Optional[int] = 0
# filters actual prime numbers.
lowercase__ : Optional[Any] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list"
return ans
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2"
lowercase__ : str = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_lowerCAmelCase ):
ans.append(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list"
return ans
def a_ ( _lowerCAmelCase : Any ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and number >= 0, "'number' must been an int and >= 0"
lowercase__ : Union[str, Any] = [] # this list will be returns of the function.
# potential prime number factors.
lowercase__ : int = 2
lowercase__ : List[Any] = number
if number == 0 or number == 1:
ans.append(_lowerCAmelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_lowerCAmelCase ):
while quotient != 1:
if is_prime(_lowerCAmelCase ) and (quotient % factor == 0):
ans.append(_lowerCAmelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list"
return ans
def a_ ( _lowerCAmelCase : Dict ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ : Optional[int] = 0
# prime factorization of 'number'
lowercase__ : List[str] = prime_factorization(_lowerCAmelCase )
lowercase__ : Any = max(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int"
return ans
def a_ ( _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ : int = 0
# prime factorization of 'number'
lowercase__ : List[str] = prime_factorization(_lowerCAmelCase )
lowercase__ : Union[str, Any] = min(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int"
return ans
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _lowerCAmelCase ), "compare bust been from type bool"
return number % 2 == 0
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _lowerCAmelCase ), "compare bust been from type bool"
return number % 2 != 0
def a_ ( _lowerCAmelCase : Dict ):
'''simple docstring'''
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (number > 2) and is_even(_lowerCAmelCase )
), "'number' must been an int, even and > 2"
lowercase__ : List[Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase__ : List[str] = get_prime_numbers(_lowerCAmelCase )
lowercase__ : Any = len(_lowerCAmelCase )
# run variable for while-loops.
lowercase__ : Optional[int] = 0
lowercase__ : str = None
# exit variable. for break up the loops
lowercase__ : Optional[int] = True
while i < len_pn and loop:
lowercase__ : Any = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase__ : Optional[Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (len(_lowerCAmelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
'''simple docstring'''
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase__ : List[Any] = 0
while numbera != 0:
lowercase__ : Optional[int] = numbera % numbera
lowercase__ : Optional[Any] = numbera
lowercase__ : Tuple = rest
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : int ):
'''simple docstring'''
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase__ : List[str] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase__ : Tuple = prime_factorization(_lowerCAmelCase )
lowercase__ : int = prime_factorization(_lowerCAmelCase )
elif numbera == 1 or numbera == 1:
lowercase__ : Optional[Any] = []
lowercase__ : Tuple = []
lowercase__ : List[Any] = max(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : List[Any] = 0
lowercase__ : List[Any] = 0
lowercase__ : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase__ : Any = prime_fac_a.count(_lowerCAmelCase )
lowercase__ : int = prime_fac_a.count(_lowerCAmelCase )
for _ in range(max(_lowerCAmelCase , _lowerCAmelCase ) ):
ans *= n
else:
lowercase__ : Optional[Any] = prime_fac_a.count(_lowerCAmelCase )
for _ in range(_lowerCAmelCase ):
ans *= n
done.append(_lowerCAmelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase__ : List[Any] = prime_fac_a.count(_lowerCAmelCase )
for _ in range(_lowerCAmelCase ):
ans *= n
done.append(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a_ ( _lowerCAmelCase : Dict ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'number' must been a positive int"
lowercase__ : Union[str, Any] = 0
lowercase__ : int = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_lowerCAmelCase ):
ans += 1
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and is_prime(
_lowerCAmelCase ), "'ans' must been a prime number and from type int"
return ans
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int ):
'''simple docstring'''
assert (
is_prime(_lowerCAmelCase ) and is_prime(_lowerCAmelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase__ : Any = p_number_a + 1 # jump to the next number
lowercase__ : str = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_lowerCAmelCase ):
number += 1
while number < p_number_a:
ans.append(_lowerCAmelCase )
number += 1
# fetch the next prime number.
while not is_prime(_lowerCAmelCase ):
number += 1
# precondition
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and ans[0] != p_number_a
and ans[len(_lowerCAmelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 1), "'n' must been int and >= 1"
lowercase__ : Optional[int] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_lowerCAmelCase )
# precondition
assert ans[0] == 1 and ans[len(_lowerCAmelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a_ ( _lowerCAmelCase : Any ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase__ : List[str] = get_divisors(_lowerCAmelCase )
# precondition
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (divisors[0] == 1)
and (divisors[len(_lowerCAmelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any ):
'''simple docstring'''
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase__ : Dict = gcd(abs(_lowerCAmelCase ) , abs(_lowerCAmelCase ) )
# precondition
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a_ ( _lowerCAmelCase : Any ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been a int and >= 0"
lowercase__ : Any = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been an int and >= 0"
lowercase__ : List[Any] = 0
lowercase__ : Union[str, Any] = 1
lowercase__ : List[str] = 1 # this will be return
for _ in range(n - 1 ):
lowercase__ : List[Any] = ans
ans += fiba
lowercase__ : Tuple = tmp
return ans
| 77
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(_lowerCamelCase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = _distribute_shards(**_lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = _split_gen_kwargs(_lowerCamelCase , _lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(_lowerCamelCase ):
_number_of_shards_in_gen_kwargs(_lowerCamelCase )
else:
_lowerCAmelCase : Optional[int] = _number_of_shards_in_gen_kwargs(_lowerCamelCase )
assert out == expected
| 36
| 0
|
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger()
@dataclass
class A_ :
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = field(default_factory=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = field(default_factory=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self :str , lowercase_ :Any , lowercase_ :Tensor , lowercase_ :Tensor ) -> Union[str, Any]:
UpperCAmelCase = len(list(m.modules() ) ) == 1 or isinstance(lowercase_ , nn.Convad ) or isinstance(lowercase_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowercase_ )
def __call__( self :Dict , lowercase_ :Tensor ) -> List[Any]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowercase_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCAmelCase__ ( self :Optional[int] ) -> Any:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda lowercase_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A_ :
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 1
__UpperCamelCase = field(default_factory=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = field(default_factory=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = True
def __call__( self :Tuple , lowercase_ :Tensor ) -> Dict:
UpperCAmelCase = Tracker(self.dest )(lowercase_ ).parametrized
UpperCAmelCase = Tracker(self.src )(lowercase_ ).parametrized
UpperCAmelCase = list(filter(lambda lowercase_ : type(lowercase_ ) not in self.src_skip , lowercase_ ) )
UpperCAmelCase = list(filter(lambda lowercase_ : type(lowercase_ ) not in self.dest_skip , lowercase_ ) )
if len(lowercase_ ) != len(lowercase_ ) and self.raise_if_mismatch:
raise Exception(
f"""Numbers of operations are different. Source module has {len(lowercase_ )} operations while"""
f""" destination module has {len(lowercase_ )}.""" )
for dest_m, src_m in zip(lowercase_ , lowercase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self :List[Any] , lowercase_ :nn.Module ) -> Union[str, Any]:
super().__init__()
UpperCAmelCase = []
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), f"""Unexpected layer name {k}"""
UpperCAmelCase = len(lowercase_ ) + 1
feature_blocks.append((f"""res{block_index}""", v) )
UpperCAmelCase = nn.ModuleDict(lowercase_ )
def UpperCAmelCase__ ( self :str , lowercase_ :Tensor ) -> Tuple:
return get_trunk_forward_outputs(
lowercase_ , out_feat_keys=lowercase_ , feature_blocks=self._feature_blocks , )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :str ) -> str:
UpperCAmelCase = x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self :Optional[int] , lowercase_ :str ) -> Callable[[], Tuple[nn.Module, Dict]]:
# default to timm!
if x not in self:
UpperCAmelCase = self.convert_name_to_timm(lowercase_ )
UpperCAmelCase = partial(lambda: (timm.create_model(lowercase_ , pretrained=lowercase_ ).eval(), None) )
else:
UpperCAmelCase = super().__getitem__(lowercase_ )
return val
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __getitem__( self :Tuple , lowercase_ :str ) -> Callable[[], nn.Module]:
if "seer" in x and "in1k" not in x:
UpperCAmelCase = RegNetModel
else:
UpperCAmelCase = RegNetForImageClassification
return val
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
for from_key, to_key in keys:
UpperCAmelCase = from_state_dict[from_key].clone()
print(F"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = True , ):
print(F"""Converting {name}...""" )
with torch.no_grad():
UpperCAmelCase , UpperCAmelCase = from_model_func()
UpperCAmelCase = our_model_func(lowercase_ ).eval()
UpperCAmelCase = ModuleTransfer(src=lowercase_ , dest=lowercase_ , raise_if_mismatch=lowercase_ )
UpperCAmelCase = torch.randn((1, 3, 224, 224) )
module_transfer(lowercase_ )
if from_state_dict is not None:
UpperCAmelCase = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
UpperCAmelCase = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
UpperCAmelCase = manually_copy_vissl_head(lowercase_ , our_model.state_dict() , lowercase_ )
our_model.load_state_dict(lowercase_ )
UpperCAmelCase = our_model(lowercase_ , output_hidden_states=lowercase_ )
UpperCAmelCase = (
our_outputs.logits if isinstance(lowercase_ , lowercase_ ) else our_outputs.last_hidden_state
)
UpperCAmelCase = from_model(lowercase_ )
UpperCAmelCase = from_output[-1] if type(lowercase_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
UpperCAmelCase = our_outputs.hidden_states[-1]
assert torch.allclose(lowercase_ , lowercase_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=lowercase_ , )
UpperCAmelCase = 224 if 'seer' not in name else 384
# we can use the convnext one
UpperCAmelCase = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=lowercase_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=lowercase_ , )
print(F"""Pushed {name}""" )
def _lowerCAmelCase ( lowercase_ , lowercase_ = None , lowercase_ = True ):
UpperCAmelCase = 'imagenet-1k-id2label.json'
UpperCAmelCase = 1000
UpperCAmelCase = (1, num_labels)
UpperCAmelCase = 'huggingface/label-files'
UpperCAmelCase = num_labels
UpperCAmelCase = json.load(open(cached_download(hf_hub_url(lowercase_ , lowercase_ , repo_type='dataset' ) ) , 'r' ) )
UpperCAmelCase = {int(lowercase_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = partial(lowercase_ , num_labels=lowercase_ , idalabel=lowercase_ , labelaid=lowercase_ )
UpperCAmelCase = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
}
UpperCAmelCase = NameToOurModelFuncMap()
UpperCAmelCase = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowercase_ , lowercase_ ) -> Tuple[nn.Module, Dict]:
UpperCAmelCase = torch.hub.load_state_dict_from_url(lowercase_ , model_dir=str(lowercase_ ) , map_location='cpu' )
UpperCAmelCase = model_func()
# check if we have a head, if yes add it
UpperCAmelCase = files['classy_state_dict']['base_model']['model']
UpperCAmelCase = model_state_dict['trunk']
model.load_state_dict(lowercase_ )
return model.eval(), model_state_dict["heads"]
# pretrained
UpperCAmelCase = partial(
lowercase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase = partial(
lowercase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase = partial(
lowercase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCAmelCase = partial(
lowercase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
UpperCAmelCase = partial(
lowercase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase = partial(
lowercase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase = partial(
lowercase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCAmelCase = partial(
lowercase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
lowercase_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowercase_ , lowercase_ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowercase_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowercase_ , lowercase_ , lowercase_ , )
return config, expected_shape
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported regnet* architecture,"""
""" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
snake_case_ = parser.parse_args()
snake_case_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 78
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCAmelCase_ :
def __init__( self, __a = "cpu", __a = "openai/clip-vit-large-patch14"):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = device
_lowerCAmelCase : Optional[int] = CLIPTokenizerFast.from_pretrained(__a)
_lowerCAmelCase : Any = [0.48_145_466, 0.4_578_275, 0.40_821_073]
_lowerCAmelCase : Union[str, Any] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
_lowerCAmelCase : Tuple = torchvision.transforms.Normalize(self.image_mean, self.image_std)
_lowerCAmelCase : Optional[int] = torchvision.transforms.Resize(224)
_lowerCAmelCase : Dict = torchvision.transforms.CenterCrop(224)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.resize(__a)
_lowerCAmelCase : List[str] = self.center_crop(__a)
_lowerCAmelCase : Optional[Any] = self.normalize(__a)
return images
def __call__( self, __a=None, __a=None, **__a):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(text=__a, **__a)
_lowerCAmelCase : List[str] = self.preprocess_img(__a)
_lowerCAmelCase : Tuple = {key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class UpperCAmelCase_ ( nn.Module):
def __init__( self, __a=10, __a=0.01, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=False, __a=True, __a="image", __a=True, __a=False, __a=False, __a=False, ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : List[str] = device if device else get_device()
if vqgan:
_lowerCAmelCase : Union[str, Any] = vqgan
else:
_lowerCAmelCase : Optional[Any] = load_vqgan(self.device, conf_path=__a, ckpt_path=__a)
self.vqgan.eval()
if clip:
_lowerCAmelCase : str = clip
else:
_lowerCAmelCase : int = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
self.clip.to(self.device)
_lowerCAmelCase : Optional[int] = ProcessorGradientFlow(device=self.device)
_lowerCAmelCase : Any = iterations
_lowerCAmelCase : List[Any] = lr
_lowerCAmelCase : Tuple = log
_lowerCAmelCase : List[str] = make_grid
_lowerCAmelCase : int = return_val
_lowerCAmelCase : Dict = quantize
_lowerCAmelCase : Any = self.vqgan.decoder.z_shape
def snake_case__ ( self, __a=None, __a=None, __a=5, __a=True):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
if output_path is None:
_lowerCAmelCase : List[Any] = "./animation.gif"
if input_path is None:
_lowerCAmelCase : str = self.save_path
_lowerCAmelCase : str = sorted(glob(input_path + "/*"))
if not len(__a):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)")
if len(__a) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)")
_lowerCAmelCase : Optional[int] = total_duration / len(__a)
_lowerCAmelCase : Union[str, Any] = [frame_duration] * len(__a)
if extend_frames:
_lowerCAmelCase : Any = 1.5
_lowerCAmelCase : List[str] = 3
for file_name in paths:
if file_name.endswith(".png"):
images.append(imageio.imread(__a))
imageio.mimsave(__a, __a, duration=__a)
print(f"gif saved to {output_path}")
def snake_case__ ( self, __a=None, __a=None):
'''simple docstring'''
if not (path or img):
raise ValueError("Input either path or tensor")
if img is not None:
raise NotImplementedError
_lowerCAmelCase : Dict = preprocess(Image.open(__a), target_image_size=256).to(self.device)
_lowerCAmelCase : Dict = preprocess_vqgan(__a)
_lowerCAmelCase , *_lowerCAmelCase : str = self.vqgan.encode(__a)
return z
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.latent.detach().requires_grad_()
_lowerCAmelCase : Dict = base_latent + transform_vector
if self.quantize:
_lowerCAmelCase , *_lowerCAmelCase : List[Any] = self.vqgan.quantize(__a)
else:
_lowerCAmelCase : Any = trans_latent
return self.vqgan.decode(__a)
def snake_case__ ( self, __a, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : int = self.clip_preprocessor(text=__a, images=__a, return_tensors="pt", padding=__a)
_lowerCAmelCase : Optional[int] = self.clip(**__a)
_lowerCAmelCase : Any = clip_outputs.logits_per_image
if weights is not None:
_lowerCAmelCase : Tuple = similarity_logits * weights
return similarity_logits.sum()
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self._get_clip_similarity(pos_prompts["prompts"], __a, weights=(1 / pos_prompts["weights"]))
if neg_prompts:
_lowerCAmelCase : List[Any] = self._get_clip_similarity(neg_prompts["prompts"], __a, weights=neg_prompts["weights"])
else:
_lowerCAmelCase : Union[str, Any] = torch.tensor([1], device=self.device)
_lowerCAmelCase : List[str] = -torch.log(__a) + torch.log(__a)
return loss
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.randn_like(self.latent, requires_grad=__a, device=self.device)
_lowerCAmelCase : Optional[int] = torch.optim.Adam([vector], lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
_lowerCAmelCase : Any = self._add_vector(__a)
_lowerCAmelCase : Optional[Any] = loop_post_process(__a)
_lowerCAmelCase : Optional[Any] = self._get_CLIP_loss(__a, __a, __a)
print("CLIP loss", __a)
if self.log:
wandb.log({"CLIP Loss": clip_loss})
clip_loss.backward(retain_graph=__a)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
wandb.init(reinit=__a, project="face-editor")
wandb.config.update({"Positive Prompts": positive_prompts})
wandb.config.update({"Negative Prompts": negative_prompts})
wandb.config.update({"lr": self.lr, "iterations": self.iterations})
if image_path:
_lowerCAmelCase : str = Image.open(__a)
_lowerCAmelCase : int = image.resize((256, 256))
wandb.log("Original Image", wandb.Image(__a))
def snake_case__ ( self, __a):
'''simple docstring'''
if not prompts:
return []
_lowerCAmelCase : int = []
_lowerCAmelCase : List[str] = []
if isinstance(__a, __a):
_lowerCAmelCase : Union[str, Any] = [prompt.strip() for prompt in prompts.split("|")]
for prompt in prompts:
if isinstance(__a, (tuple, list)):
_lowerCAmelCase : Optional[Any] = prompt[0]
_lowerCAmelCase : Union[str, Any] = float(prompt[1])
elif ":" in prompt:
_lowerCAmelCase , _lowerCAmelCase : int = prompt.split(":")
_lowerCAmelCase : Optional[Any] = float(__a)
else:
_lowerCAmelCase : Optional[int] = prompt
_lowerCAmelCase : List[Any] = 1.0
processed_prompts.append(__a)
weights.append(__a)
return {
"prompts": processed_prompts,
"weights": torch.tensor(__a, device=self.device),
}
def snake_case__ ( self, __a, __a=None, __a=None, __a=True, __a=False, __a=True, __a=True, __a=None, ):
'''simple docstring'''
if image_path:
_lowerCAmelCase : List[Any] = self._get_latent(__a)
else:
_lowerCAmelCase : Any = torch.randn(self.latent_dim, device=self.device)
if self.log:
self._init_logging(__a, __a, __a)
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCAmelCase : int = self.process_prompts(__a)
_lowerCAmelCase : List[str] = self.process_prompts(__a)
if save_final and save_path is None:
_lowerCAmelCase : int = os.path.join("./outputs/", "_".join(pos_prompts["prompts"]))
if not os.path.exists(__a):
os.makedirs(__a)
else:
_lowerCAmelCase : Tuple = save_path + "_" + get_timestamp()
os.makedirs(__a)
_lowerCAmelCase : Tuple = save_path
_lowerCAmelCase : List[Any] = self.vqgan.decode(self.latent)[0]
if show_intermediate:
print("Original Image")
show_pil(custom_to_pil(__a))
_lowerCAmelCase : int = loop_post_process(__a)
for iter, transformed_img in enumerate(self._optimize_CLIP(__a, __a, __a)):
if show_intermediate:
show_pil(__a)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png"))
if self.log:
wandb.log({"Image": wandb.Image(__a)})
if show_final:
show_pil(__a)
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png"))
| 36
| 0
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , *__UpperCAmelCase : str , **__UpperCAmelCase : Optional[int] ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(__UpperCAmelCase )
def __call__( self : List[Any] , __UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__UpperCAmelCase : Any ):
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : Tuple , **__UpperCAmelCase : str ):
'''simple docstring'''
return {}, {}, {}
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
_A = load_image(__UpperCAmelCase )
_A = image.size
_A = self.image_processor(images=__UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : int ):
'''simple docstring'''
_A = self.model(**__UpperCAmelCase )
return model_outputs
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
_A = model_outputs.predicted_depth
_A = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=__UpperCAmelCase )
_A = prediction.squeeze().cpu().numpy()
_A = (output * 255 / np.max(__UpperCAmelCase )).astype("uint8" )
_A = Image.fromarray(__UpperCAmelCase )
_A = {}
_A = predicted_depth
_A = depth
return output_dict
| 79
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_snake_case = get_tests_dir("fixtures")
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = mock.Mock()
_lowerCAmelCase : int = 500
_lowerCAmelCase : Tuple = {}
_lowerCAmelCase : str = HTTPError
_lowerCAmelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
_lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request", return_value=__a) as mock_head:
_lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# This check we did call the fake head request
mock_head.assert_called()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json")
def snake_case__ ( self):
'''simple docstring'''
with self.assertRaises(__a):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase : int = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants")
_lowerCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants", subfolder="feature_extractor")
self.assertIsNotNone(__a)
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase):
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TOKEN
HfFolder.save_token(__a)
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id="test-image-processor")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-image-processor-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="test-dynamic-image-processor")
except HTTPError:
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(__a)
image_processor.push_to_hub("test-image-processor", use_auth_token=self._token)
_lowerCAmelCase : str = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a))
# Reset repo
delete_repo(token=self._token, repo_id="test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__a, repo_id="test-image-processor", push_to_hub=__a, use_auth_token=self._token)
_lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = ViTImageProcessor.from_pretrained(__a)
image_processor.push_to_hub("valid_org/test-image-processor", use_auth_token=self._token)
_lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained("valid_org/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a))
# Reset repo
delete_repo(token=self._token, repo_id="valid_org/test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__a, repo_id="valid_org/test-image-processor-org", push_to_hub=__a, use_auth_token=self._token)
_lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org")
for k, v in image_processor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a))
def snake_case__ ( self):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
_lowerCAmelCase : List[str] = CustomImageProcessor.from_pretrained(__a)
image_processor.push_to_hub("test-dynamic-image-processor", use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"}, )
_lowerCAmelCase : Tuple = AutoImageProcessor.from_pretrained(
f"{USER}/test-dynamic-image-processor", trust_remote_code=__a)
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, "CustomImageProcessor")
| 36
| 0
|
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a__ : List[str] = TypeVar('T')
class lowercase_ ( Generic[T] ):
__UpperCAmelCase = 42 # Cache store of keys
__UpperCAmelCase = 42 # References of the keys in cache
__UpperCAmelCase = 10 # Maximum capacity of cache
def __init__( self , a ):
UpperCamelCase__ = deque()
UpperCamelCase__ = set()
if not n:
UpperCamelCase__ = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
UpperCamelCase__ = n
def __a ( self , a ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCamelCase__ = self.dq_store.pop()
self.key_reference.remove(a )
else:
self.dq_store.remove(a )
self.dq_store.appendleft(a )
self.key_reference.add(a )
def __a ( self ):
for k in self.dq_store:
print(a )
def __repr__( self ):
return f'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 80
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=24, __a=2, __a=6, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=None, __a=1000, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : int = seq_length
_lowerCAmelCase : Optional[int] = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : List[str] = use_token_type_ids
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : int = max_position_embeddings
_lowerCAmelCase : Optional[int] = type_vocab_size
_lowerCAmelCase : Optional[Any] = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : List[Any] = num_labels
_lowerCAmelCase : Tuple = scope
_lowerCAmelCase : str = range_bbox
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : Dict = bbox[i, j, 3]
_lowerCAmelCase : int = bbox[i, j, 1]
_lowerCAmelCase : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : str = bbox[i, j, 2]
_lowerCAmelCase : List[Any] = bbox[i, j, 0]
_lowerCAmelCase : str = t
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
_lowerCAmelCase : Dict = None
if self.use_token_type_ids:
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : Optional[int] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = LiltModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(__a, bbox=__a, attention_mask=__a, token_type_ids=__a)
_lowerCAmelCase : str = model(__a, bbox=__a, token_type_ids=__a)
_lowerCAmelCase : List[Any] = model(__a, bbox=__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Optional[Any] = LiltForTokenClassification(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(
__a, bbox=__a, attention_mask=__a, token_type_ids=__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = LiltForQuestionAnswering(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Tuple = model(
__a, bbox=__a, attention_mask=__a, token_type_ids=__a, start_positions=__a, end_positions=__a, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , a , unittest.TestCase):
lowerCamelCase__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
return True
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = LiltModelTester(self)
_lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : Any = type
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = LiltModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_torch
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__a)
_lowerCAmelCase : Any = torch.tensor([[1, 2]], device=__a)
_lowerCAmelCase : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]], device=__a)
# forward pass
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(input_ids=__a, bbox=__a)
_lowerCAmelCase : Optional[int] = torch.Size([1, 2, 768])
_lowerCAmelCase : List[str] = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]], device=__a, )
self.assertTrue(outputs.last_hidden_state.shape, __a)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3], __a, atol=1E-3))
| 36
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase_ : List[str] = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 81
|
import argparse
import copy
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = {}
with open(_lowerCamelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_lowerCAmelCase : Tuple = []
_list.append([line.split()[1], line.split()[2]] )
_lowerCAmelCase : Any = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_lowerCAmelCase : str = []
_list.append([line.split()[0], line.split()[2]] )
_lowerCAmelCase : Any = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase ) as f:
_lowerCAmelCase : str = f.read(1 )
_lowerCAmelCase : str = start_node
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Any = start_node
_lowerCAmelCase : str = 0
while visiting not in first_solution:
_lowerCAmelCase : Dict = 10_000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_lowerCamelCase ) and k[0] not in first_solution:
_lowerCAmelCase : List[str] = k[1]
_lowerCAmelCase : List[Any] = k[0]
first_solution.append(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = distance_of_first_solution + int(_lowerCamelCase )
_lowerCAmelCase : str = best_node
first_solution.append(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_lowerCAmelCase : Tuple = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10_000
)
return first_solution, distance_of_first_solution
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = []
for n in solution[1:-1]:
_lowerCAmelCase : Dict = solution.index(_lowerCamelCase )
for kn in solution[1:-1]:
_lowerCAmelCase : Dict = solution.index(_lowerCamelCase )
if n == kn:
continue
_lowerCAmelCase : Optional[int] = copy.deepcopy(_lowerCamelCase )
_lowerCAmelCase : int = kn
_lowerCAmelCase : Dict = n
_lowerCAmelCase : Optional[int] = 0
for k in _tmp[:-1]:
_lowerCAmelCase : str = _tmp[_tmp.index(_lowerCamelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_lowerCAmelCase : Optional[Any] = distance + int(i[1] )
_tmp.append(_lowerCamelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_lowerCAmelCase : List[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _lowerCamelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : int = first_solution
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Tuple = distance_of_first_solution
_lowerCAmelCase : Optional[int] = solution
while count <= iters:
_lowerCAmelCase : int = find_neighborhood(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = neighborhood[index_of_best_solution]
_lowerCAmelCase : int = len(_lowerCamelCase ) - 1
_lowerCAmelCase : Union[str, Any] = False
while not found:
_lowerCAmelCase : Tuple = 0
while i < len(_lowerCamelCase ):
if best_solution[i] != solution[i]:
_lowerCAmelCase : str = best_solution[i]
_lowerCAmelCase : Tuple = solution[i]
break
_lowerCAmelCase : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Optional[Any] = best_solution[:-1]
_lowerCAmelCase : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_lowerCAmelCase : Union[str, Any] = cost
_lowerCAmelCase : List[Any] = solution
else:
_lowerCAmelCase : Optional[Any] = index_of_best_solution + 1
_lowerCAmelCase : Optional[Any] = neighborhood[index_of_best_solution]
if len(_lowerCamelCase ) >= size:
tabu_list.pop(0 )
_lowerCAmelCase : int = count + 1
return best_solution_ever, best_cost
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = generate_neighbours(args.File )
_lowerCAmelCase , _lowerCAmelCase : List[str] = generate_first_solution(
args.File , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Any = tabu_search(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 36
| 0
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A__ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = PegasusTokenizer
__lowerCamelCase = PegasusTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = True
def snake_case ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase = PegasusTokenizer(_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case ( self ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def snake_case ( self , **_snake_case ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
return ("This is a test", "This is a test")
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """</s>"""
_lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_snake_case ) , 1103 )
def snake_case ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_lowerCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_lowerCAmelCase = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_lowerCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
_lowerCAmelCase = py_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_lowerCAmelCase = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_lowerCAmelCase = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
_lowerCAmelCase = tokenizer([raw_input_str] , return_tensors=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_lowerCAmelCase = """To ensure a smooth flow of bank resolutions."""
_lowerCAmelCase = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
_lowerCAmelCase = tokenizer([raw_input_str] , return_tensors=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = ["""This is going to be way too long.""" * 150, """short example"""]
_lowerCAmelCase = ["""not super long but more than 5 tokens""", """tiny"""]
_lowerCAmelCase = self._large_tokenizer(_snake_case , padding=_snake_case , truncation=_snake_case , return_tensors="""pt""" )
_lowerCAmelCase = self._large_tokenizer(
text_target=_snake_case , max_length=5 , padding=_snake_case , truncation=_snake_case , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_snake_case ) == 2 # input_ids, attention_mask.
@slow
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = PegasusTokenizer
__lowerCamelCase = PegasusTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = True
def snake_case ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase = PegasusTokenizer(_snake_case , offset=0 , mask_token_sent=_snake_case , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case ( self ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def snake_case ( self , **_snake_case ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
return ("This is a test", "This is a test")
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_lowerCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_lowerCAmelCase = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_lowerCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
_lowerCAmelCase = py_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = ["""This is going to be way too long.""" * 1000, """short example"""]
_lowerCAmelCase = ["""not super long but more than 5 tokens""", """tiny"""]
_lowerCAmelCase = self._large_tokenizer(_snake_case , padding=_snake_case , truncation=_snake_case , return_tensors="""pt""" )
_lowerCAmelCase = self._large_tokenizer(
text_target=_snake_case , max_length=5 , padding=_snake_case , truncation=_snake_case , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_snake_case ) == 2 # input_ids, attention_mask.
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_lowerCAmelCase = self._large_tokenizer(_snake_case ).input_ids
self.assertListEqual(
_snake_case , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 82
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = BartphoTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : str = ["▁This", "▁is", "▁a", "▁t", "est"]
_lowerCAmelCase : List[str] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file, "w", encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n")
_lowerCAmelCase : Optional[Any] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self, **__a):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "This is a là test"
_lowerCAmelCase : Optional[int] = "This is a<unk><unk> test"
return input_text, output_text
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map)
_lowerCAmelCase : List[Any] = "This is a là test"
_lowerCAmelCase : str = "▁This ▁is ▁a ▁l à ▁t est".split()
_lowerCAmelCase : str = tokenizer.tokenize(__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a), __a)
| 36
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : Dict = torch.device('cpu')
def A__ ( ):
_UpperCamelCase : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCamelCase : Dict = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
def A__ ( UpperCAmelCase_ ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703E00, 2.1107E00, -2.0811E00, 8.8685E-01, 2.4360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636E-01, 2.3478E-01, -1.6963E00, -1.7381E00, -8.6337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768E-01, -4.7429E-01, -1.0897E00, -1.0248E00, 3.5523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330E-01, 2.4211E-01, -6.0185E-01, -8.2789E-01, -6.0446E-02] )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Dict = dct.pop(UpperCAmelCase_ )
_UpperCamelCase : str = val
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : str = []
for k in state_dict.keys():
_UpperCamelCase : str = k
if ".pwconv" in k:
_UpperCamelCase : Dict = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
_UpperCamelCase : Dict = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
_UpperCamelCase : str = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
_UpperCamelCase : List[str] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
_UpperCamelCase : int = k_new.split('.' )
if ls[2].isdigit():
_UpperCamelCase : Tuple = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
_UpperCamelCase : int = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : str = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_UpperCamelCase : List[Any] = 1_0_0_0
_UpperCamelCase : List[str] = 'huggingface/label-files'
_UpperCamelCase : Optional[Any] = 'imagenet-1k-id2label.json'
_UpperCamelCase : Union[str, Any] = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) )
_UpperCamelCase : Dict = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : Any = idalabel
_UpperCamelCase : Any = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_UpperCamelCase : Optional[int] = [3, 3, 6, 4]
_UpperCamelCase : Optional[int] = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
_UpperCamelCase : Any = [3, 3, 9, 6]
_UpperCamelCase : List[Any] = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
_UpperCamelCase : Any = [4, 3, 1_0, 5]
_UpperCamelCase : List[Any] = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
_UpperCamelCase : Union[str, Any] = [4, 4, 1_2, 6]
_UpperCamelCase : str = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
_UpperCamelCase : Tuple = torch.hub.load_state_dict_from_url(UpperCAmelCase_ , map_location='cpu' , check_hash=UpperCAmelCase_ )
else:
_UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : Optional[int] = checkpoint
_UpperCamelCase : Dict = create_rename_keys(UpperCAmelCase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# load HuggingFace model
_UpperCamelCase : Union[str, Any] = SwiftFormerForImageClassification(UpperCAmelCase_ ).eval()
hf_model.load_state_dict(UpperCAmelCase_ )
# prepare test inputs
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : str = ViTImageProcessor.from_pretrained('preprocessor_config' )
_UpperCamelCase : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='pt' )
# compare outputs from both models
_UpperCamelCase : Tuple = get_expected_output(UpperCAmelCase_ )
_UpperCamelCase : str = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , UpperCAmelCase_ , atol=1E-3 )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(f'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
snake_case_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
snake_case_ : Union[str, Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 83
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def constraint_to_multiple_of(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=None ):
_lowerCAmelCase : Tuple = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
_lowerCAmelCase : List[str] = math.ceil(val / multiple ) * multiple
return x
_lowerCAmelCase : Union[str, Any] = (output_size, output_size) if isinstance(_lowerCamelCase , _lowerCamelCase ) else output_size
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = get_image_size(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Any = output_size
# determine new height and width
_lowerCAmelCase : List[Any] = output_height / input_height
_lowerCAmelCase : Any = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowerCAmelCase : Union[str, Any] = scale_width
else:
# fit height
_lowerCAmelCase : Union[str, Any] = scale_height
_lowerCAmelCase : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCamelCase )
_lowerCAmelCase : Dict = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCamelCase )
return (new_height, new_width)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ['pixel_values']
def __init__( self, __a = True, __a = None, __a = PILImageResampling.BILINEAR, __a = False, __a = 1, __a = True, __a = 1 / 255, __a = True, __a = None, __a = None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Any = size if size is not None else {"height": 384, "width": 384}
_lowerCAmelCase : Optional[int] = get_size_dict(__a)
_lowerCAmelCase : Optional[Any] = do_resize
_lowerCAmelCase : Dict = size
_lowerCAmelCase : Any = keep_aspect_ratio
_lowerCAmelCase : str = ensure_multiple_of
_lowerCAmelCase : str = resample
_lowerCAmelCase : Dict = do_rescale
_lowerCAmelCase : Optional[int] = rescale_factor
_lowerCAmelCase : Dict = do_normalize
_lowerCAmelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self, __a, __a, __a = False, __a = 1, __a = PILImageResampling.BICUBIC, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}")
_lowerCAmelCase : List[Any] = get_resize_output_image_size(
__a, output_size=(size["height"], size["width"]), keep_aspect_ratio=__a, multiple=__a, )
return resize(__a, size=__a, resample=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return rescale(__a, scale=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return normalize(__a, mean=__a, std=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = ChannelDimension.FIRST, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : List[Any] = size if size is not None else self.size
_lowerCAmelCase : str = get_size_dict(__a)
_lowerCAmelCase : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowerCAmelCase : Any = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowerCAmelCase : int = resample if resample is not None else self.resample
_lowerCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : List[str] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : Optional[Any] = make_list_of_images(__a)
if not valid_images(__a):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
_lowerCAmelCase : List[Any] = [to_numpy_array(__a) for image in images]
if do_resize:
_lowerCAmelCase : Any = [self.resize(image=__a, size=__a, resample=__a) for image in images]
if do_rescale:
_lowerCAmelCase : List[str] = [self.rescale(image=__a, scale=__a) for image in images]
if do_normalize:
_lowerCAmelCase : Dict = [self.normalize(image=__a, mean=__a, std=__a) for image in images]
_lowerCAmelCase : List[str] = [to_channel_dimension_format(__a, __a) for image in images]
_lowerCAmelCase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=__a, tensor_type=__a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__a) != len(__a):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(__a):
_lowerCAmelCase : List[Any] = target_sizes.numpy()
_lowerCAmelCase : Dict = []
for idx in range(len(__a)):
_lowerCAmelCase : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=__a)
_lowerCAmelCase : int = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(__a)
else:
_lowerCAmelCase : Dict = logits.argmax(dim=1)
_lowerCAmelCase : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 36
| 0
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE ( A__ ):
@staticmethod
@abstractmethod
def __lowerCAmelCase ( __A ) -> Optional[int]:
raise NotImplementedError()
@abstractmethod
def __lowerCAmelCase ( self ) -> List[Any]:
raise NotImplementedError()
| 84
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = "huggingface/label-files"
_lowerCAmelCase : int = "imagenet-1k-id2label.json"
_lowerCAmelCase : Tuple = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : Tuple = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : Tuple = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_lowerCAmelCase : Optional[int] = BitConfig(
conv_layer=_lowerCamelCase , num_labels=1_000 , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , )
return config
def A ( _lowerCamelCase ):
'''simple docstring'''
if "stem.conv" in name:
_lowerCAmelCase : List[str] = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
_lowerCAmelCase : Any = name.replace("blocks" , "layers" )
if "head.fc" in name:
_lowerCAmelCase : Optional[Any] = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
_lowerCAmelCase : Any = "bit." + name
if "bit" not in name and "classifier" not in name:
_lowerCAmelCase : Dict = "bit.encoder." + name
return name
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = get_config(_lowerCamelCase )
# load original model from timm
_lowerCAmelCase : int = create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model
_lowerCAmelCase : Any = timm_model.state_dict()
for key in state_dict.copy().keys():
_lowerCAmelCase : Dict = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val.squeeze() if "head" in key else val
# load HuggingFace model
_lowerCAmelCase : Optional[Any] = BitForImageClassification(_lowerCamelCase )
model.eval()
model.load_state_dict(_lowerCamelCase )
# create image processor
_lowerCAmelCase : Dict = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = transform.transforms
_lowerCAmelCase : Tuple = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
_lowerCAmelCase : Tuple = BitImageProcessor(
do_resize=_lowerCamelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Any = transform(_lowerCamelCase ).unsqueeze(0 )
_lowerCAmelCase : Optional[int] = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
# verify logits
with torch.no_grad():
_lowerCAmelCase : Tuple = model(_lowerCamelCase )
_lowerCAmelCase : str = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
_lowerCAmelCase : Union[str, Any] = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(F"ybelkada/{model_name}" )
processor.push_to_hub(F"ybelkada/{model_name}" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
_snake_case = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 36
| 0
|
'''simple docstring'''
import heapq
import sys
import numpy as np
_SCREAMING_SNAKE_CASE : Optional[int] = tuple[int, int]
class _snake_case :
def __init__( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = []
snake_case_ = set()
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return len(self.elements ) == 0
def lowerCAmelCase__ ( self , a__ , a__ ) -> List[str]:
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(a__ )
else:
# update
# print("update", item)
snake_case_ = []
((snake_case_) , (snake_case_)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((snake_case_) , (snake_case_)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def lowerCAmelCase__ ( self , a__ ) -> int:
'''simple docstring'''
if item in self.set:
self.set.remove(a__ )
snake_case_ = []
((snake_case_) , (snake_case_)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((snake_case_) , (snake_case_)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return self.elements[0][1]
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
((snake_case_) , (snake_case_)) = heapq.heappop(self.elements )
self.set.remove(a__ )
return (priority, item)
def UpperCamelCase_( snake_case : TPos , snake_case : TPos ):
'''simple docstring'''
snake_case_ = np.array(snake_case )
snake_case_ = np.array(snake_case )
return np.linalg.norm(a - b )
def UpperCamelCase_( snake_case : TPos , snake_case : TPos ):
'''simple docstring'''
return consistent_heuristic(snake_case , snake_case ) // t
def UpperCamelCase_( snake_case : TPos , snake_case : TPos ):
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def UpperCamelCase_( snake_case : TPos , snake_case : int , snake_case : TPos , snake_case : dict[TPos, float] ):
'''simple docstring'''
snake_case_ = g_function[start] + Wa * heuristics[i](snake_case , snake_case )
return ans
def UpperCamelCase_( snake_case : List[Any] , snake_case : List[Any] , snake_case : int ):
'''simple docstring'''
snake_case_ = np.chararray((n, n) )
for i in range(snake_case ):
for j in range(snake_case ):
snake_case_ = "*"
for i in range(snake_case ):
for j in range(snake_case ):
if (j, (n - 1) - i) in blocks:
snake_case_ = "#"
snake_case_ = "-"
snake_case_ = back_pointer[goal]
while x != start:
((snake_case_) , (snake_case_)) = x
# print(x)
snake_case_ = "-"
snake_case_ = back_pointer[x]
snake_case_ = "-"
for i in range(snake_case ):
for j in range(snake_case ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
snake_case_ = back_pointer[goal]
while x != start:
print(snake_case , end=" " )
snake_case_ = back_pointer[x]
print(snake_case )
sys.exit()
def UpperCamelCase_( snake_case : TPos ):
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def UpperCamelCase_( snake_case : List[Any] , snake_case : int , snake_case : Tuple , snake_case : int , snake_case : List[str] , snake_case : Tuple , snake_case : Dict , snake_case : Dict , ):
'''simple docstring'''
for itera in range(snake_case ):
open_list[itera].remove_element(snake_case )
# print("s", s)
# print("j", j)
((snake_case_) , (snake_case_)) = s
snake_case_ = (x - 1, y)
snake_case_ = (x + 1, y)
snake_case_ = (x, y + 1)
snake_case_ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(snake_case ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(snake_case )
snake_case_ = -1
snake_case_ = float("inf" )
if valid(snake_case ) and g_function[neighbours] > g_function[s] + 1:
snake_case_ = g_function[s] + 1
snake_case_ = s
if neighbours not in close_list_anchor:
open_list[0].put(snake_case , key(snake_case , 0 , snake_case , snake_case ) )
if neighbours not in close_list_inad:
for var in range(1 , snake_case ):
if key(snake_case , snake_case , snake_case , snake_case ) <= Wa * key(
snake_case , 0 , snake_case , snake_case ):
open_list[j].put(
snake_case , key(snake_case , snake_case , snake_case , snake_case ) )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
_SCREAMING_SNAKE_CASE : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
_SCREAMING_SNAKE_CASE : int = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
_SCREAMING_SNAKE_CASE : Optional[int] = make_common_ground()
_SCREAMING_SNAKE_CASE : List[str] = blocks_blk
# hyper parameters
_SCREAMING_SNAKE_CASE : Tuple = 1
_SCREAMING_SNAKE_CASE : Optional[int] = 1
_SCREAMING_SNAKE_CASE : int = 20
_SCREAMING_SNAKE_CASE : Tuple = 3 # one consistent and two other inconsistent
# start and end destination
_SCREAMING_SNAKE_CASE : List[str] = (0, 0)
_SCREAMING_SNAKE_CASE : Optional[int] = (n - 1, n - 1)
_SCREAMING_SNAKE_CASE : Tuple = 1
def UpperCamelCase_( snake_case : TPos , snake_case : TPos , snake_case : int ):
'''simple docstring'''
snake_case_ = {start: 0, goal: float("inf" )}
snake_case_ = {start: -1, goal: -1}
snake_case_ = []
snake_case_ = set()
for i in range(snake_case ):
open_list.append(PriorityQueue() )
open_list[i].put(snake_case , key(snake_case , snake_case , snake_case , snake_case ) )
snake_case_ = []
snake_case_ = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , snake_case ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(snake_case , snake_case , snake_case )
else:
snake_case_ , snake_case_ = open_list[i].top_show()
visited.add(snake_case )
expand_state(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , )
close_list_inad.append(snake_case )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(snake_case , snake_case , snake_case )
else:
snake_case_ = open_list[0].top_show()
visited.add(snake_case )
expand_state(
snake_case , 0 , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , )
close_list_anchor.append(snake_case )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(snake_case ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 85
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'swin'
lowerCamelCase__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=[2, 2, 6, 2], __a=[3, 6, 12, 24], __a=7, __a=4.0, __a=True, __a=0.0, __a=0.0, __a=0.1, __a="gelu", __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Any = image_size
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : Tuple = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Optional[Any] = len(__a)
_lowerCAmelCase : int = num_heads
_lowerCAmelCase : int = window_size
_lowerCAmelCase : int = mlp_ratio
_lowerCAmelCase : List[Any] = qkv_bias
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Any = drop_path_rate
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Tuple = use_absolute_embeddings
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Tuple = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : List[str] = int(embed_dim * 2 ** (len(__a) - 1))
_lowerCAmelCase : List[Any] = ["stem"] + [f"stage{idx}" for idx in range(1, len(__a) + 1)]
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 36
| 0
|
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowerCamelCase__ = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[str] = test_results.split(' ' )
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : List[Any] = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__lowerCAmelCase : int = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_UpperCamelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[int] = {}
__lowerCAmelCase : int = None
__lowerCAmelCase : List[Any] = False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]' , _UpperCamelCase ):
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Union[str, Any] = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
__lowerCAmelCase : Union[str, Any] = line
__lowerCAmelCase : Any = False
return failures
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = title
__lowerCAmelCase : List[Any] = doc_test_results['time_spent'].split(',' )[0]
__lowerCAmelCase : Optional[int] = doc_test_results['success']
__lowerCAmelCase : Dict = doc_test_results['failures']
__lowerCAmelCase : Tuple = self.n_success + self.n_failures
# Failures and success of the modeling tests
__lowerCAmelCase : Optional[int] = doc_test_results
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = [self._time_spent]
__lowerCAmelCase : int = 0
for time in time_spent:
__lowerCAmelCase : Tuple = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_SCREAMING_SNAKE_CASE ) == 1:
__lowerCAmelCase : Dict = [0, 0, time_parts[0]]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f"{int(_SCREAMING_SNAKE_CASE )}h{int(_SCREAMING_SNAKE_CASE )}m{int(_SCREAMING_SNAKE_CASE )}s"
@property
def __lowerCamelCase ( self ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __lowerCamelCase ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def __lowerCamelCase ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = 40
__lowerCAmelCase : int = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
__lowerCAmelCase : Any = ''
for category, failures in category_failures.items():
if len(_SCREAMING_SNAKE_CASE ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_SCREAMING_SNAKE_CASE )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_SCREAMING_SNAKE_CASE )
@staticmethod
def __lowerCamelCase ( ):
__lowerCAmelCase : Dict = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(_SCREAMING_SNAKE_CASE )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
__lowerCAmelCase : Any = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else 'All tests passed.'
__lowerCAmelCase : Optional[Any] = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = ''
for key, value in failures.items():
__lowerCAmelCase : str = value[:2_00] + ' [Truncated]' if len(_SCREAMING_SNAKE_CASE ) > 2_50 else value
failures_text += f"*{key}*\n_{value}_\n\n"
__lowerCAmelCase : int = job_name
__lowerCAmelCase : str = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
__lowerCAmelCase : int = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __lowerCamelCase ( self ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
__lowerCAmelCase : int = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
__lowerCAmelCase : Union[str, Any] = sorted(self.doc_test_results.items() , key=lambda _SCREAMING_SNAKE_CASE : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
__lowerCAmelCase : List[Any] = f"*Num failures* :{len(job_result['failed'] )} \n"
__lowerCAmelCase : Optional[int] = job_result['failures']
__lowerCAmelCase : Dict = self.get_reply_blocks(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , text=_SCREAMING_SNAKE_CASE )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=f"Results for {job}" , blocks=_SCREAMING_SNAKE_CASE , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def __lowerCAmelCase ():
__lowerCAmelCase : int = os.environ['GITHUB_RUN_ID']
__lowerCAmelCase : Optional[int] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
__lowerCAmelCase : int = requests.get(_UpperCamelCase ).json()
__lowerCAmelCase : int = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
__lowerCAmelCase : Optional[int] = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_UpperCamelCase ):
__lowerCAmelCase : int = requests.get(url + F"&page={i + 2}" ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , _UpperCamelCase )
return {}
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[str] = {}
if os.path.exists(_UpperCamelCase ):
__lowerCAmelCase : Any = os.listdir(_UpperCamelCase )
for file in files:
try:
with open(os.path.join(_UpperCamelCase , _UpperCamelCase ) , encoding='utf-8' ) as f:
__lowerCAmelCase : List[str] = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"Could not open {os.path.join(_UpperCamelCase , _UpperCamelCase )}." ) from e
return _artifact
def __lowerCAmelCase ():
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = name
__lowerCAmelCase : str = []
def __str__( self ):
return self.name
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
self.paths.append({'name': self.name, 'path': path} )
__lowerCAmelCase : Dict[str, Artifact] = {}
__lowerCAmelCase : Optional[Any] = filter(os.path.isdir , os.listdir() )
for directory in directories:
__lowerCAmelCase : Optional[int] = directory
if artifact_name not in _available_artifacts:
__lowerCAmelCase : Union[str, Any] = Artifact(_UpperCamelCase )
_available_artifacts[artifact_name].add_path(_UpperCamelCase )
return _available_artifacts
if __name__ == "__main__":
lowerCamelCase__ = get_job_links()
lowerCamelCase__ = retrieve_available_artifacts()
lowerCamelCase__ = collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowerCamelCase__ = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowerCamelCase__ = github_actions_job_links.get("""run_doctests""")
lowerCamelCase__ = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
lowerCamelCase__ = retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = handle_test_results(artifact["""stats"""])
lowerCamelCase__ = failed
lowerCamelCase__ = success
lowerCamelCase__ = time_spent[1:-1] + """, """
lowerCamelCase__ = extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
lowerCamelCase__ = line.replace("""FAILED """, """""")
lowerCamelCase__ = line.split()[0].replace("""\n""", """""")
if "::" in line:
lowerCamelCase__ , lowerCamelCase__ = line.split("""::""")
else:
lowerCamelCase__ , lowerCamelCase__ = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowerCamelCase__ = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowerCamelCase__ = all_failures[test] if test in all_failures else """N/A"""
lowerCamelCase__ = failure
break
lowerCamelCase__ = Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 86
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 36
| 0
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case_ ( __A ):
__A : List[Any] = (UnCLIPScheduler,)
def __UpperCamelCase ( self : Union[str, Any] , **lowercase_ : List[str] ) -> List[str]:
lowercase__ : int = {
"num_train_timesteps": 10_00,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**lowercase_ )
return config
def __UpperCamelCase ( self : Tuple ) -> Tuple:
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowercase_ )
def __UpperCamelCase ( self : int ) -> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase_ )
def __UpperCamelCase ( self : str ) -> int:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowercase_ )
def __UpperCamelCase ( self : int ) -> Optional[int]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowercase_ )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowercase_ , prev_timestep=lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
lowercase__ : Dict = self.scheduler_classes[0]
lowercase__ : Optional[Any] = self.get_scheduler_config(variance_type="fixed_small_log" )
lowercase__ : Optional[int] = scheduler_class(**lowercase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.9_99_49_87 ) ) < 1E-5
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase__ : Dict = self.scheduler_classes[0]
lowercase__ : Optional[Any] = self.get_scheduler_config(variance_type="learned_range" )
lowercase__ : Any = scheduler_class(**lowercase_ )
lowercase__ : Tuple = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowercase_ ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(4_87 , predicted_variance=lowercase_ ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(9_99 , predicted_variance=lowercase_ ) - -0.0_01_00_11 < 1E-5
def __UpperCamelCase ( self : str ) -> List[Any]:
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[Any] = scheduler_class(**lowercase_ )
lowercase__ : Union[str, Any] = scheduler.timesteps
lowercase__ : List[Any] = self.dummy_model()
lowercase__ : str = self.dummy_sample_deter
lowercase__ : int = torch.manual_seed(0 )
for i, t in enumerate(lowercase_ ):
# 1. predict noise residual
lowercase__ : Optional[int] = model(lowercase_ , lowercase_ )
# 2. predict previous mean of sample x_t-1
lowercase__ : int = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
lowercase__ : List[str] = pred_prev_sample
lowercase__ : List[str] = torch.sum(torch.abs(lowercase_ ) )
lowercase__ : List[Any] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
lowercase__ : Optional[Any] = self.scheduler_classes[0]
lowercase__ : Any = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(25 )
lowercase__ : Union[str, Any] = scheduler.timesteps
lowercase__ : Tuple = self.dummy_model()
lowercase__ : int = self.dummy_sample_deter
lowercase__ : Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(lowercase_ ):
# 1. predict noise residual
lowercase__ : Tuple = model(lowercase_ , lowercase_ )
if i + 1 == timesteps.shape[0]:
lowercase__ : int = None
else:
lowercase__ : Union[str, Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase__ : str = scheduler.step(
lowercase_ , lowercase_ , lowercase_ , prev_timestep=lowercase_ , generator=lowercase_ ).prev_sample
lowercase__ : str = pred_prev_sample
lowercase__ : Dict = torch.sum(torch.abs(lowercase_ ) )
lowercase__ : List[Any] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def __UpperCamelCase ( self : Any ) -> Tuple:
pass
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
pass
| 87
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
_snake_case = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if got_ver is None or want_ver is None:
raise ValueError(
F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
F" reinstalling {pkg}." )
if not ops[op](version.parse(_lowerCamelCase ) , version.parse(_lowerCamelCase ) ):
raise ImportError(
F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def A ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = F"\n{hint}" if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$" , _lowerCamelCase ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = requirement, None, None
else:
_lowerCAmelCase : Optional[int] = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , _lowerCamelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
F" got {requirement}" )
_lowerCAmelCase , _lowerCAmelCase : Dict = match[0]
_lowerCAmelCase : Any = want_full.split("," ) # there could be multiple requirements
_lowerCAmelCase : Optional[int] = {}
for w in want_range:
_lowerCAmelCase : Any = re.findall(r"^([\s!=<>]{1,2})(.+)" , _lowerCamelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
F" but got {requirement}" )
_lowerCAmelCase , _lowerCAmelCase : Tuple = match[0]
_lowerCAmelCase : Union[str, Any] = want_ver
if op not in ops:
raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
_lowerCAmelCase : Tuple = ".".join([str(_lowerCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return
# check if any version is installed
try:
_lowerCAmelCase : Any = importlib.metadata.version(_lowerCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(_lowerCamelCase , _lowerCamelCase )
| 36
| 0
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Any = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( _A , unittest.TestCase ):
'''simple docstring'''
a__ = GPTSwaTokenizer
a__ = False
a__ = True
a__ = False
def _lowercase ( self : Any ) -> List[str]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__magic_name__ = GPTSwaTokenizer(UpperCamelCase__ , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self : int , UpperCamelCase__ : List[str] ) -> Dict:
"""simple docstring"""
__magic_name__ = """This is a test"""
__magic_name__ = """This is a test"""
return input_text, output_text
def _lowercase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__magic_name__ = """<s>"""
__magic_name__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(UpperCamelCase__ ) , 2000 )
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def _lowercase ( self : Any ) -> str:
"""simple docstring"""
__magic_name__ = GPTSwaTokenizer(UpperCamelCase__ )
__magic_name__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [465, 287, 265, 631, 842] )
__magic_name__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
UpperCamelCase__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
__magic_name__ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__magic_name__ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
# fmt: off
self.assertListEqual(
UpperCamelCase__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def _lowercase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__magic_name__ = GPTSwaTokenizer(UpperCamelCase__ )
__magic_name__ = ["""This is a test""", """I was born in 92000, and this is falsé."""]
__magic_name__ = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertListEqual(tokenizer.encode_fast(UpperCamelCase__ ) , UpperCamelCase__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(tokenizer.decode_fast(UpperCamelCase__ ) , UpperCamelCase__ )
@slow
def _lowercase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
__magic_name__ = {"""input_ids""": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=UpperCamelCase__ , )
| 88
|
import argparse
from collections import defaultdict
import yaml
_snake_case = "docs/source/en/_toctree.yml"
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = defaultdict(_lowerCamelCase )
_lowerCAmelCase : Any = []
_lowerCAmelCase : List[str] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = new_doc_list
_lowerCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase : str = []
for duplicate_key in duplicates:
_lowerCAmelCase : List[str] = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(_lowerCamelCase ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
_lowerCAmelCase : Optional[Any] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowerCamelCase ) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed." )
overview_doc.extend(_lowerCamelCase )
# Sort
return overview_doc
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : int = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : List[str] = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : Union[str, Any] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowerCAmelCase : Optional[Any] = api_doc[scheduler_idx]["sections"]
_lowerCAmelCase : Optional[Any] = clean_doc_toc(_lowerCamelCase )
_lowerCAmelCase : int = False
if new_scheduler_doc != scheduler_doc:
_lowerCAmelCase : List[Any] = True
if overwrite:
_lowerCAmelCase : Dict = new_scheduler_doc
if diff:
if overwrite:
_lowerCAmelCase : Tuple = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : int = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : List[str] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Optional[int] = api_doc[pipeline_idx]["sections"]
_lowerCAmelCase : Tuple = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowerCAmelCase : List[Any] = pipeline_doc["section"]
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
if overwrite:
_lowerCAmelCase : Optional[Any] = new_sub_pipeline_doc
new_pipeline_docs.append(_lowerCamelCase )
# sort overall pipeline doc
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
if new_pipeline_docs != pipeline_docs:
_lowerCAmelCase : Dict = True
if overwrite:
_lowerCAmelCase : Optional[int] = new_pipeline_docs
if diff:
if overwrite:
_lowerCAmelCase : Optional[int] = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_snake_case = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 36
| 0
|
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__lowerCAmelCase = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__lowerCAmelCase = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__lowerCAmelCase = {
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__lowerCAmelCase = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__lowerCAmelCase = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__lowerCAmelCase = {
'''num_train_timesteps''': 151,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def __lowerCamelCase ( lowerCAmelCase_ ) -> Dict:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int:
_a : Tuple = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
_a : int = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
_a : Tuple = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
_a : List[str] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
_a : Optional[int] = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
_a : Optional[Any] = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
_a : Optional[Any] = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
_a : List[str] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
_a : int = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
_a : int = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
_a : List[Any] = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
_a : Any = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Optional[int]:
_a , _a , _a : Optional[Any] = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
_a , _a , _a : Optional[Any] = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
_a : int = checkpoint[f"""{old_prefix}.norm.weight"""]
_a : Dict = checkpoint[f"""{old_prefix}.norm.bias"""]
_a : Union[str, Any] = weight_q.squeeze(-1 ).squeeze(-1 )
_a : int = bias_q.squeeze(-1 ).squeeze(-1 )
_a : Optional[Any] = weight_k.squeeze(-1 ).squeeze(-1 )
_a : int = bias_k.squeeze(-1 ).squeeze(-1 )
_a : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
_a : List[str] = bias_v.squeeze(-1 ).squeeze(-1 )
_a : Union[str, Any] = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
_a : int = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_a : List[Any] = torch.load(lowerCAmelCase_ , map_location='cpu' )
_a : List[Any] = {}
_a : List[str] = checkpoint['time_embed.0.weight']
_a : Union[str, Any] = checkpoint['time_embed.0.bias']
_a : Optional[Any] = checkpoint['time_embed.2.weight']
_a : str = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_a : List[str] = checkpoint['label_emb.weight']
_a : Optional[Any] = checkpoint['input_blocks.0.0.weight']
_a : List[Any] = checkpoint['input_blocks.0.0.bias']
_a : Optional[int] = unet_config['down_block_types']
_a : Union[str, Any] = unet_config['layers_per_block']
_a : Tuple = unet_config['attention_head_dim']
_a : Dict = unet_config['block_out_channels']
_a : int = 1
_a : Dict = channels_list[0]
for i, layer_type in enumerate(lowerCAmelCase_ ):
_a : Optional[Any] = channels_list[i]
_a : List[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowerCAmelCase_ ):
_a : Tuple = f"""down_blocks.{i}.resnets.{j}"""
_a : Any = f"""input_blocks.{current_layer}.0"""
_a : str = True if j == 0 and downsample_block_has_skip else False
_a : Tuple = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , has_skip=lowerCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowerCAmelCase_ ):
_a : Tuple = f"""down_blocks.{i}.resnets.{j}"""
_a : List[str] = f"""input_blocks.{current_layer}.0"""
_a : Optional[Any] = True if j == 0 and downsample_block_has_skip else False
_a : Any = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , has_skip=lowerCAmelCase_ )
_a : List[str] = f"""down_blocks.{i}.attentions.{j}"""
_a : Optional[int] = f"""input_blocks.{current_layer}.1"""
_a : List[str] = convert_attention(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
current_layer += 1
if i != len(lowerCAmelCase_ ) - 1:
_a : Optional[Any] = f"""down_blocks.{i}.downsamplers.0"""
_a : Tuple = f"""input_blocks.{current_layer}.0"""
_a : str = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
current_layer += 1
_a : List[str] = current_channels
# hardcoded the mid-block for now
_a : Optional[Any] = 'mid_block.resnets.0'
_a : str = 'middle_block.0'
_a : Optional[int] = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a : Optional[Any] = 'mid_block.attentions.0'
_a : Any = 'middle_block.1'
_a : Optional[int] = convert_attention(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = 'mid_block.resnets.1'
_a : Any = 'middle_block.2'
_a : Union[str, Any] = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a : Union[str, Any] = 0
_a : str = unet_config['up_block_types']
for i, layer_type in enumerate(lowerCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_a : int = f"""up_blocks.{i}.resnets.{j}"""
_a : int = f"""output_blocks.{current_layer}.0"""
_a : Union[str, Any] = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , has_skip=lowerCAmelCase_ )
current_layer += 1
if i != len(lowerCAmelCase_ ) - 1:
_a : List[str] = f"""up_blocks.{i}.upsamplers.0"""
_a : Tuple = f"""output_blocks.{current_layer-1}.1"""
_a : str = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_a : Any = f"""up_blocks.{i}.resnets.{j}"""
_a : Optional[int] = f"""output_blocks.{current_layer}.0"""
_a : Tuple = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , has_skip=lowerCAmelCase_ )
_a : Union[str, Any] = f"""up_blocks.{i}.attentions.{j}"""
_a : str = f"""output_blocks.{current_layer}.1"""
_a : Union[str, Any] = convert_attention(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
current_layer += 1
if i != len(lowerCAmelCase_ ) - 1:
_a : Any = f"""up_blocks.{i}.upsamplers.0"""
_a : int = f"""output_blocks.{current_layer-1}.2"""
_a : List[Any] = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a : List[str] = checkpoint['out.0.weight']
_a : int = checkpoint['out.0.bias']
_a : Tuple = checkpoint['out.2.weight']
_a : Tuple = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = strabool(args.class_cond)
__lowerCAmelCase = os.path.basename(args.unet_path)
print(f"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
__lowerCAmelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__lowerCAmelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__lowerCAmelCase = TEST_UNET_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
__lowerCAmelCase = None
__lowerCAmelCase = con_pt_to_diffuser(args.unet_path, unet_config)
__lowerCAmelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__lowerCAmelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__lowerCAmelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__lowerCAmelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
__lowerCAmelCase = CMStochasticIterativeScheduler(**scheduler_config)
__lowerCAmelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 89
|
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
| 0
|
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = CustomTokenizer
pass
| 90
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
_snake_case = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class UpperCAmelCase_ ( a):
def __init__( self, __a = 101):
'''simple docstring'''
_lowerCAmelCase : str = length
def __len__( self):
'''simple docstring'''
return self.length
def __getitem__( self, __a):
'''simple docstring'''
return i
class UpperCAmelCase_ :
def __call__( self, __a):
'''simple docstring'''
return {"input_ids": torch.tensor(__a), "labels": torch.tensor(__a)}
class UpperCAmelCase_ ( nn.Module):
def __init__( self):
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_lowerCAmelCase : str = nn.Linear(120, 80)
def snake_case__ ( self, __a, __a=None):
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0, device=input_ids.device), input_ids
else:
return input_ids
class UpperCAmelCase_ ( a):
@require_torch_neuroncore
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = f"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
_lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[int] = f"--output_dir {output_dir}".split()
_lowerCAmelCase : List[Any] = ["torchrun"] + distributed_args + args
execute_subprocess_async(__a, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class UpperCAmelCase_ ( a):
@require_torch_multi_gpu
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = f"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
_lowerCAmelCase : Any = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[int] = f"--output_dir {output_dir}".split()
_lowerCAmelCase : Any = ["torchrun"] + distributed_args + args
execute_subprocess_async(__a, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
_snake_case = HfArgumentParser((TrainingArguments,))
_snake_case = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
_snake_case = DummyDataset(dataset_length)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = list(range(len(_lowerCamelCase ) ) )
_lowerCAmelCase : Union[str, Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
F"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
_snake_case = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
_snake_case = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_snake_case = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_snake_case = 2
_snake_case = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_snake_case = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_snake_case = None
| 36
| 0
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "WhisperFeatureExtractor"
__UpperCamelCase = "WhisperTokenizer"
def __init__( self : List[str] , lowercase_ : List[Any] , lowercase_ : Dict):
'''simple docstring'''
super().__init__(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.feature_extractor
SCREAMING_SNAKE_CASE_ : Dict = False
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str=None , lowercase_ : Optional[Any]=None , lowercase_ : Any=True):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=lowercase_ , language=lowercase_ , no_timestamps=lowercase_)
def __call__( self : Dict , *lowercase_ : Dict , **lowercase_ : Union[str, Any]):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''audio''' , lowercase_)
SCREAMING_SNAKE_CASE_ : str = kwargs.pop('''sampling_rate''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('''text''' , lowercase_)
if len(lowercase_) > 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = args[0]
SCREAMING_SNAKE_CASE_ : Tuple = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''')
if audio is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
if text is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer(lowercase_ , **lowercase_)
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = encodings['''input_ids''']
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[str] , *lowercase_ : Optional[Any] , **lowercase_ : Tuple):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowercase_ : Dict , **lowercase_ : Optional[Any]):
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : str , lowercase_ : Optional[Any]="np"):
'''simple docstring'''
return self.tokenizer.get_prompt_ids(lowercase_ , return_tensors=lowercase_)
| 91
|
from __future__ import annotations
import bisect
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
if hi < 0:
_lowerCAmelCase : int = len(_lowerCamelCase )
while lo < hi:
_lowerCAmelCase : Optional[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_lowerCAmelCase : Union[str, Any] = mid + 1
else:
_lowerCAmelCase : str = mid
return lo
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
if hi < 0:
_lowerCAmelCase : str = len(_lowerCamelCase )
while lo < hi:
_lowerCAmelCase : Tuple = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_lowerCAmelCase : Dict = mid + 1
else:
_lowerCAmelCase : str = mid
return lo
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_left(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_right(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 0
_lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase ) - 1
while left <= right:
_lowerCAmelCase : int = left + (right - left) // 2
_lowerCAmelCase : int = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_lowerCAmelCase : str = midpoint - 1
else:
_lowerCAmelCase : Any = midpoint + 1
return None
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = bisect.bisect_left(_lowerCamelCase , _lowerCamelCase )
if index != len(_lowerCamelCase ) and sorted_collection[index] == item:
return index
return None
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if right < left:
return None
_lowerCAmelCase : Optional[int] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , midpoint + 1 , _lowerCamelCase )
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by comma:\n").strip()
_snake_case = sorted(int(item) for item in user_input.split(","))
_snake_case = int(input("Enter a single number to be found in the list:\n"))
_snake_case = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''')
| 36
| 0
|
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE_ : list[float] ):
if len(SCREAMING_SNAKE_CASE_ ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
__lowerCAmelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCAmelCase_ ( a):
def snake_case__ ( self, __a):
'''simple docstring'''
return 0.0
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCAmelCase : Optional[int] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 512
_lowerCAmelCase : Union[str, Any] = [1] + [0] * (size - 1)
_lowerCAmelCase : Optional[Any] = [filter_type.process(_lowerCamelCase ) for item in inputs]
_lowerCAmelCase : int = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase : str = np.abs(np.fft.fft(_lowerCamelCase ) )
_lowerCAmelCase : Union[str, Any] = 20 * np.logaa(_lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
_lowerCAmelCase : List[Any] = get_bounds(_lowerCamelCase , _lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(_lowerCamelCase )
plt.show()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 512
_lowerCAmelCase : Optional[Any] = [1] + [0] * (size - 1)
_lowerCAmelCase : str = [filter_type.process(_lowerCamelCase ) for item in inputs]
_lowerCAmelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase : Optional[Any] = np.angle(np.fft.fft(_lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(_lowerCamelCase , -2 * pi ) )
plt.show()
| 36
| 0
|
'''simple docstring'''
import os
import string
import sys
_lowercase : str = 1 << 8
_lowercase : Tuple = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 2_7,
"up": 6_5 + ARROW_KEY_FLAG,
"down": 6_6 + ARROW_KEY_FLAG,
"right": 6_7 + ARROW_KEY_FLAG,
"left": 6_8 + ARROW_KEY_FLAG,
"mod_int": 9_1,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 5_0,
"delete": 5_1,
"pg_up": 5_3,
"pg_down": 5_4,
}
_lowercase : List[str] = KEYMAP["up"]
_lowercase : Any = KEYMAP["left"]
if sys.platform == "win32":
_lowercase : str = []
_lowercase : List[str] = {
B"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
B"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(1_0):
_lowercase : Any = ord(str(i))
def snake_case_ ( ):
"""simple docstring"""
if os.name == "nt":
import msvcrt
lowercase_ : Dict = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__SCREAMING_SNAKE_CASE ) == 0:
# Read the keystroke
lowercase_ : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowercase_ : Any = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowercase_ : Tuple = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(__SCREAMING_SNAKE_CASE )
if ord(__SCREAMING_SNAKE_CASE ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowercase_ : Dict = chr(KEYMAP['''esc'''] )
except KeyError:
lowercase_ : Dict = cha[1]
else:
lowercase_ : List[Any] = ch.decode(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : Any = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowercase_ : Dict = sys.stdin.fileno()
lowercase_ : List[Any] = termios.tcgetattr(__SCREAMING_SNAKE_CASE )
try:
tty.setraw(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = sys.stdin.read(1 )
finally:
termios.tcsetattr(__SCREAMING_SNAKE_CASE , termios.TCSADRAIN , __SCREAMING_SNAKE_CASE )
return ch
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Optional[int] = get_raw_chars()
if ord(__SCREAMING_SNAKE_CASE ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__SCREAMING_SNAKE_CASE ) == KEYMAP["esc"]:
lowercase_ : str = get_raw_chars()
if ord(__SCREAMING_SNAKE_CASE ) == KEYMAP["mod_int"]:
lowercase_ : Dict = get_raw_chars()
if ord(__SCREAMING_SNAKE_CASE ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__SCREAMING_SNAKE_CASE ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__SCREAMING_SNAKE_CASE ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 93
|
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
_lowerCAmelCase : List[str] = gray_code_sequence_string(_lowerCamelCase )
#
# convert them to integers
for i in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : List[str] = int(sequence[i] , 2 )
return sequence
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_lowerCAmelCase : List[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_lowerCAmelCase : Optional[int] = gray_code_sequence_string(bit_count - 1 )
_lowerCAmelCase : str = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_lowerCAmelCase : Dict = "0" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_lowerCAmelCase : Optional[Any] = "1" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
| 0
|
import heapq
def __lowerCamelCase ( UpperCAmelCase_ : dict ):
"""simple docstring"""
a :list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(UpperCAmelCase_ , [-1 * len(UpperCAmelCase_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
a :Optional[Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
a :Tuple = heapq.heappop(UpperCAmelCase_ )[1][0]
chosen_vertices.add(UpperCAmelCase_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
a :Optional[Any] = elem[1][1].index(UpperCAmelCase_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(UpperCAmelCase_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : Optional[Any] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 94
|
from PIL import Image
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = image.size
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Tuple = image.load()
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCamelCase ):
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_snake_case = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 36
| 0
|
def _A ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
a__ : Optional[Any] =[0] * len(SCREAMING_SNAKE_CASE )
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
# use last results for better performance - dynamic programming
a__ : Tuple =prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
a__ : str =prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
a__ : str =j
return prefix_result
def _A ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return max(prefix_function(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'wav2vec2'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : Union[str, Any] = feat_extract_activation
_lowerCAmelCase : Optional[Any] = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : str = list(__a)
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : str = num_conv_pos_embeddings
_lowerCAmelCase : List[Any] = num_conv_pos_embedding_groups
_lowerCAmelCase : str = len(self.conv_dim)
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : Optional[Any] = hidden_dropout
_lowerCAmelCase : List[str] = attention_dropout
_lowerCAmelCase : Tuple = activation_dropout
_lowerCAmelCase : int = feat_proj_dropout
_lowerCAmelCase : List[str] = final_dropout
_lowerCAmelCase : int = layerdrop
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : Optional[Any] = do_stable_layer_norm
_lowerCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : str = apply_spec_augment
_lowerCAmelCase : Optional[Any] = mask_time_prob
_lowerCAmelCase : Optional[int] = mask_time_length
_lowerCAmelCase : List[str] = mask_time_min_masks
_lowerCAmelCase : Optional[int] = mask_feature_prob
_lowerCAmelCase : Optional[int] = mask_feature_length
_lowerCAmelCase : List[str] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : Union[str, Any] = num_codevectors_per_group
_lowerCAmelCase : str = num_codevector_groups
_lowerCAmelCase : Optional[int] = contrastive_logits_temperature
_lowerCAmelCase : Optional[int] = feat_quantizer_dropout
_lowerCAmelCase : Optional[int] = num_negatives
_lowerCAmelCase : Union[str, Any] = codevector_dim
_lowerCAmelCase : Any = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Tuple = ctc_loss_reduction
_lowerCAmelCase : Tuple = ctc_zero_infinity
# adapter
_lowerCAmelCase : List[Any] = add_adapter
_lowerCAmelCase : List[str] = adapter_kernel_size
_lowerCAmelCase : str = adapter_stride
_lowerCAmelCase : List[str] = num_adapter_layers
_lowerCAmelCase : str = output_hidden_size or hidden_size
_lowerCAmelCase : Tuple = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : str = list(__a)
_lowerCAmelCase : Union[str, Any] = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : Tuple = xvector_output_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 36
| 0
|
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_lowerCamelCase : List[Any] = (boundary[1] - boundary[0]) / steps
_lowerCamelCase : Tuple = boundary[0]
_lowerCamelCase : Dict = boundary[1]
_lowerCamelCase : List[Any] = make_points(lowercase__ , lowercase__ , lowercase__ )
_lowerCamelCase : List[Any] = 0.0
y += (h / 2.0) * f(lowercase__ )
for i in x_i:
# print(i)
y += h * f(lowercase__ )
y += (h / 2.0) * f(lowercase__ )
return y
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : str = a + h
while x < (b - h):
yield x
_lowerCamelCase : int = x + h
def _snake_case ( lowercase__ ): # enter your function here
_lowerCamelCase : Optional[Any] = (x - 0) * (x - 0)
return y
def _snake_case ( ):
_lowerCamelCase : int = 0.0 # Lower bound of integration
_lowerCamelCase : Optional[int] = 1.0 # Upper bound of integration
_lowerCamelCase : List[str] = 1_0.0 # define number of steps or resolution
_lowerCamelCase : List[Any] = [a, b] # define boundary of integration
_lowerCamelCase : Optional[Any] = method_a(lowercase__ , lowercase__ )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 96
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , a , )
class UpperCAmelCase_ ( a):
lowerCamelCase__ = RobertaConfig
lowerCamelCase__ = 'roberta'
def __init__( self, __a):
'''simple docstring'''
super().__init__(__a)
_lowerCAmelCase : Optional[Any] = RobertaEmbeddings(__a)
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , a , )
class UpperCAmelCase_ ( a):
lowerCamelCase__ = RobertaConfig
lowerCamelCase__ = 'roberta'
def __init__( self, __a):
'''simple docstring'''
super().__init__(__a)
_lowerCAmelCase : Optional[int] = config.num_labels
_lowerCAmelCase : Optional[int] = config.num_hidden_layers
_lowerCAmelCase : Optional[int] = DeeRobertaModel(__a)
_lowerCAmelCase : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob)
_lowerCAmelCase : List[str] = nn.Linear(config.hidden_size, self.config.num_labels)
@add_start_docstrings_to_model_forward(__a)
def snake_case__ ( self, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=-1, __a=False, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.num_layers
try:
_lowerCAmelCase : List[Any] = self.roberta(
__a, attention_mask=__a, token_type_ids=__a, position_ids=__a, head_mask=__a, inputs_embeds=__a, )
_lowerCAmelCase : List[Any] = outputs[1]
_lowerCAmelCase : Dict = self.dropout(__a)
_lowerCAmelCase : Dict = self.classifier(__a)
_lowerCAmelCase : Optional[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : Union[str, Any] = e.exit_layer
_lowerCAmelCase : List[Any] = outputs[0]
if not self.training:
_lowerCAmelCase : int = entropy(__a)
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : str = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Optional[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1), labels.view(-1))
else:
_lowerCAmelCase : Optional[Any] = CrossEntropyLoss()
_lowerCAmelCase : Optional[Any] = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
# work with highway exits
_lowerCAmelCase : Optional[int] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Any = highway_exit[0]
if not self.training:
highway_logits_all.append(__a)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[str] = MSELoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1), labels.view(-1))
else:
_lowerCAmelCase : Dict = CrossEntropyLoss()
_lowerCAmelCase : Optional[Any] = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1))
highway_losses.append(__a)
if train_highway:
_lowerCAmelCase : int = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Optional[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 36
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''spm_char.model'''}
__snake_case = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
__snake_case = {
'''microsoft/speecht5_asr''': 1024,
'''microsoft/speecht5_tts''': 1024,
'''microsoft/speecht5_vc''': 1024,
}
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
UpperCamelCase__ :Any = vocab_file
UpperCamelCase__ :Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase_ )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.__dict__.copy()
UpperCamelCase__ :Union[str, Any] = None
return state
def __setstate__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ :Optional[Any] = {}
UpperCamelCase__ :Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.sp_model.piece_to_id(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.sp_model.IdToPiece(UpperCamelCase_ )
return token
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = []
UpperCamelCase__ :Optional[Any] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase_ ) + token
UpperCamelCase__ :Optional[Any] = []
else:
current_sub_tokens.append(UpperCamelCase_ )
out_string += self.sp_model.decode(UpperCamelCase_ )
return out_string.strip()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = [1]
if token_ids_a is None:
return ([0] * len(UpperCamelCase_ )) + suffix_ones
return ([0] * len(UpperCamelCase_ )) + ([0] * len(UpperCamelCase_ )) + suffix_ones
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase__ :int = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
UpperCamelCase__ :List[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 97
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'vision-encoder-decoder'
lowerCamelCase__ = True
def __init__( self, **__a):
'''simple docstring'''
super().__init__(**__a)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"A configuraton of type {self.model_type} cannot be instantiated because "
f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}")
_lowerCAmelCase : str = kwargs.pop("encoder")
_lowerCAmelCase : Any = encoder_config.pop("model_type")
_lowerCAmelCase : str = kwargs.pop("decoder")
_lowerCAmelCase : List[str] = decoder_config.pop("model_type")
_lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Optional[int] = True
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config")
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : str = True
return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[str] = self.encoder.to_dict()
_lowerCAmelCase : List[str] = self.decoder.to_dict()
_lowerCAmelCase : Any = self.__class__.model_type
return output
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}})
class UpperCAmelCase_ ( a):
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = OrderedDict()
_lowerCAmelCase : Any = {0: "batch", 1: "past_decoder_sequence + sequence"}
_lowerCAmelCase : List[str] = {0: "batch", 1: "past_decoder_sequence + sequence"}
_lowerCAmelCase : Optional[Any] = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def snake_case__ ( self, __a, __a = -1, __a = -1, __a = False, __a = None, ):
'''simple docstring'''
import torch
_lowerCAmelCase : Optional[Any] = OrderedDict()
_lowerCAmelCase : List[str] = super().generate_dummy_inputs(
__a, batch_size=__a, seq_length=__a, is_pair=__a, framework=__a)
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = dummy_input["input_ids"].shape
_lowerCAmelCase : str = (batch, encoder_sequence, self._config.encoder_hidden_size)
_lowerCAmelCase : List[str] = dummy_input.pop("input_ids")
_lowerCAmelCase : List[str] = dummy_input.pop("attention_mask")
_lowerCAmelCase : Optional[int] = torch.zeros(__a)
return common_inputs
class UpperCAmelCase_ ( a):
@property
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self, __a):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(__a)
def snake_case__ ( self, __a, __a, __a = "default"):
'''simple docstring'''
_lowerCAmelCase : Dict = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__a, __a)
| 36
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = 42
class snake_case ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = True
@register_to_config
def __init__( self : Dict ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : Tuple[str] = ("DownEncoderBlock2D",) ,lowerCamelCase__ : Tuple[str] = ("UpDecoderBlock2D",) ,lowerCamelCase__ : Tuple[int] = (64,) ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : str = "silu" ,lowerCamelCase__ : int = 4 ,lowerCamelCase__ : int = 32 ,lowerCamelCase__ : int = 32 ,lowerCamelCase__ : float = 0.1_8_2_1_5 ,):
super().__init__()
# pass init params to Encoder
UpperCAmelCase__ = Encoder(
in_channels=lowerCamelCase__ ,out_channels=lowerCamelCase__ ,down_block_types=lowerCamelCase__ ,block_out_channels=lowerCamelCase__ ,layers_per_block=lowerCamelCase__ ,act_fn=lowerCamelCase__ ,norm_num_groups=lowerCamelCase__ ,double_z=lowerCamelCase__ ,)
# pass init params to Decoder
UpperCAmelCase__ = Decoder(
in_channels=lowerCamelCase__ ,out_channels=lowerCamelCase__ ,up_block_types=lowerCamelCase__ ,block_out_channels=lowerCamelCase__ ,layers_per_block=lowerCamelCase__ ,norm_num_groups=lowerCamelCase__ ,act_fn=lowerCamelCase__ ,)
UpperCAmelCase__ = nn.Convad(2 * latent_channels ,2 * latent_channels ,1 )
UpperCAmelCase__ = nn.Convad(lowerCamelCase__ ,lowerCamelCase__ ,1 )
UpperCAmelCase__ = False
UpperCAmelCase__ = False
# only relevant if vae tiling is enabled
UpperCAmelCase__ = self.config.sample_size
UpperCAmelCase__ = (
self.config.sample_size[0]
if isinstance(self.config.sample_size ,(list, tuple) )
else self.config.sample_size
)
UpperCAmelCase__ = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase__ = 0.2_5
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int=False ):
if isinstance(lowerCamelCase__ ,(Encoder, Decoder) ):
UpperCAmelCase__ = value
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : bool = True ):
UpperCAmelCase__ = use_tiling
def __lowerCAmelCase ( self : List[Any] ):
self.enable_tiling(lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = True
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = {}
def fn_recursive_add_processors(lowerCamelCase__ : str ,lowerCamelCase__ : torch.nn.Module ,lowerCamelCase__ : Dict[str, AttentionProcessor] ):
if hasattr(lowerCamelCase__ ,'set_processor' ):
UpperCAmelCase__ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' ,lowerCamelCase__ ,lowerCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
return processors
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
UpperCAmelCase__ = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) and len(lowerCamelCase__ ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCamelCase__ : str ,lowerCamelCase__ : torch.nn.Module ,lowerCamelCase__ : str ):
if hasattr(lowerCamelCase__ ,'set_processor' ):
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
module.set_processor(lowerCamelCase__ )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' ,lowerCamelCase__ ,lowerCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : bool = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowerCamelCase__ ,return_dict=lowerCamelCase__ )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase__ = [self.encoder(lowerCamelCase__ ) for x_slice in x.split(1 )]
UpperCAmelCase__ = torch.cat(lowerCamelCase__ )
else:
UpperCAmelCase__ = self.encoder(lowerCamelCase__ )
UpperCAmelCase__ = self.quant_conv(lowerCamelCase__ )
UpperCAmelCase__ = DiagonalGaussianDistribution(lowerCamelCase__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : bool = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowerCamelCase__ ,return_dict=lowerCamelCase__ )
UpperCAmelCase__ = self.post_quant_conv(lowerCamelCase__ )
UpperCAmelCase__ = self.decoder(lowerCamelCase__ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase__ )
@apply_forward_hook
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : bool = True ):
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase__ = [self._decode(lowerCamelCase__ ).sample for z_slice in z.split(1 )]
UpperCAmelCase__ = torch.cat(lowerCamelCase__ )
else:
UpperCAmelCase__ = self._decode(lowerCamelCase__ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[int] ):
UpperCAmelCase__ = min(a.shape[2] ,b.shape[2] ,lowerCamelCase__ )
for y in range(lowerCamelCase__ ):
UpperCAmelCase__ = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = min(a.shape[3] ,b.shape[3] ,lowerCamelCase__ )
for x in range(lowerCamelCase__ ):
UpperCAmelCase__ = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : bool = True ):
UpperCAmelCase__ = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase__ = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase__ = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase__ = []
for i in range(0 ,x.shape[2] ,lowerCamelCase__ ):
UpperCAmelCase__ = []
for j in range(0 ,x.shape[3] ,lowerCamelCase__ ):
UpperCAmelCase__ = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase__ = self.encoder(lowerCamelCase__ )
UpperCAmelCase__ = self.quant_conv(lowerCamelCase__ )
row.append(lowerCamelCase__ )
rows.append(lowerCamelCase__ )
UpperCAmelCase__ = []
for i, row in enumerate(lowerCamelCase__ ):
UpperCAmelCase__ = []
for j, tile in enumerate(lowerCamelCase__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase__ = self.blend_v(rows[i - 1][j] ,lowerCamelCase__ ,lowerCamelCase__ )
if j > 0:
UpperCAmelCase__ = self.blend_h(row[j - 1] ,lowerCamelCase__ ,lowerCamelCase__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase__ ,dim=3 ) )
UpperCAmelCase__ = torch.cat(lowerCamelCase__ ,dim=2 )
UpperCAmelCase__ = DiagonalGaussianDistribution(lowerCamelCase__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : bool = True ):
UpperCAmelCase__ = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase__ = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase__ = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase__ = []
for i in range(0 ,z.shape[2] ,lowerCamelCase__ ):
UpperCAmelCase__ = []
for j in range(0 ,z.shape[3] ,lowerCamelCase__ ):
UpperCAmelCase__ = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase__ = self.post_quant_conv(lowerCamelCase__ )
UpperCAmelCase__ = self.decoder(lowerCamelCase__ )
row.append(lowerCamelCase__ )
rows.append(lowerCamelCase__ )
UpperCAmelCase__ = []
for i, row in enumerate(lowerCamelCase__ ):
UpperCAmelCase__ = []
for j, tile in enumerate(lowerCamelCase__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase__ = self.blend_v(rows[i - 1][j] ,lowerCamelCase__ ,lowerCamelCase__ )
if j > 0:
UpperCAmelCase__ = self.blend_h(row[j - 1] ,lowerCamelCase__ ,lowerCamelCase__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase__ ,dim=3 ) )
UpperCAmelCase__ = torch.cat(lowerCamelCase__ ,dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[torch.Generator] = None ,):
UpperCAmelCase__ = sample
UpperCAmelCase__ = self.encode(lowerCamelCase__ ).latent_dist
if sample_posterior:
UpperCAmelCase__ = posterior.sample(generator=lowerCamelCase__ )
else:
UpperCAmelCase__ = posterior.mode()
UpperCAmelCase__ = self.decode(lowerCamelCase__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase__ )
| 98
|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase_ ( a):
def __get__( self, __a, __a=None):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute")
_lowerCAmelCase : List[Any] = "__cached_" + self.fget.__name__
_lowerCAmelCase : Dict = getattr(__a, __a, __a)
if cached is None:
_lowerCAmelCase : str = self.fget(__a)
setattr(__a, __a, __a)
return cached
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"invalid truth value {val!r}" )
def A ( _lowerCamelCase ):
'''simple docstring'''
if is_torch_fx_proxy(_lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(_lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(_lowerCamelCase , np.ndarray )
def A ( _lowerCamelCase ):
'''simple docstring'''
return isinstance(_lowerCamelCase , np.ndarray )
def A ( _lowerCamelCase ):
'''simple docstring'''
return _is_numpy(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import torch
return isinstance(_lowerCamelCase , torch.Tensor )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import torch
return isinstance(_lowerCamelCase , torch.device )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import torch
if isinstance(_lowerCamelCase , _lowerCamelCase ):
if hasattr(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase )
else:
return False
return isinstance(_lowerCamelCase , torch.dtype )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import tensorflow as tf
return isinstance(_lowerCamelCase , tf.Tensor )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_lowerCamelCase , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(_lowerCamelCase )
return type(_lowerCamelCase ) == tf.Tensor
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(_lowerCamelCase , jnp.ndarray )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_flax_available() else _is_jax(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
if isinstance(_lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple) ):
return [to_py_obj(_lowerCamelCase ) for o in obj]
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase ).tolist()
elif isinstance(_lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def A ( _lowerCamelCase ):
'''simple docstring'''
if isinstance(_lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple) ):
return np.array(_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase )
else:
return obj
class UpperCAmelCase_ ( a):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = fields(self)
# Safety and consistency checks
if not len(__a):
raise ValueError(f"{self.__class__.__name__} has no fields.")
if not all(field.default is None for field in class_fields[1:]):
raise ValueError(f"{self.__class__.__name__} should not have more than one required field.")
_lowerCAmelCase : Dict = getattr(self, class_fields[0].name)
_lowerCAmelCase : str = all(getattr(self, field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(__a):
if isinstance(__a, __a):
_lowerCAmelCase : Tuple = first_field.items()
_lowerCAmelCase : Dict = True
else:
try:
_lowerCAmelCase : Dict = iter(__a)
_lowerCAmelCase : Any = True
except TypeError:
_lowerCAmelCase : Any = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__a):
if (
not isinstance(__a, (list, tuple))
or not len(__a) == 2
or not isinstance(element[0], __a)
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_lowerCAmelCase : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"Cannot set key/value for {element}. It needs to be a tuple (key, value).")
break
setattr(self, element[0], element[1])
if element[1] is not None:
_lowerCAmelCase : Any = element[1]
elif first_field is not None:
_lowerCAmelCase : Any = first_field
else:
for field in class_fields:
_lowerCAmelCase : Dict = getattr(self, field.name)
if v is not None:
_lowerCAmelCase : Union[str, Any] = v
def __delitem__( self, *__a, **__a):
'''simple docstring'''
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.")
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.")
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.")
def __getitem__( self, __a):
'''simple docstring'''
if isinstance(__a, __a):
_lowerCAmelCase : Optional[int] = dict(self.items())
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self, __a, __a):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__a, __a)
super().__setattr__(__a, __a)
def __setitem__( self, __a, __a):
'''simple docstring'''
super().__setitem__(__a, __a)
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
return tuple(self[k] for k in self.keys())
class UpperCAmelCase_ ( a , a):
@classmethod
def snake_case__ ( cls, __a):
'''simple docstring'''
raise ValueError(
f"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}")
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'longest'
lowerCamelCase__ = 'max_length'
lowerCamelCase__ = 'do_not_pad'
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'pt'
lowerCamelCase__ = 'tf'
lowerCamelCase__ = 'np'
lowerCamelCase__ = 'jax'
class UpperCAmelCase_ :
def __init__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = context_managers
_lowerCAmelCase : Dict = ExitStack()
def __enter__( self):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(__a)
def __exit__( self, *__a, **__a):
'''simple docstring'''
self.stack.__exit__(*__a, **__a)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = infer_framework(_lowerCamelCase )
if framework == "tf":
_lowerCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_lowerCAmelCase : str = inspect.signature(model_class.forward ) # PyTorch models
else:
_lowerCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = model_class.__name__
_lowerCAmelCase : Optional[Any] = infer_framework(_lowerCamelCase )
if framework == "tf":
_lowerCAmelCase : Dict = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_lowerCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
_lowerCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def A ( _lowerCamelCase , _lowerCamelCase = "" , _lowerCamelCase = "." ):
'''simple docstring'''
def _flatten_dict(_lowerCamelCase , _lowerCamelCase="" , _lowerCamelCase="." ):
for k, v in d.items():
_lowerCAmelCase : Dict = str(_lowerCamelCase ) + delimiter + str(_lowerCamelCase ) if parent_key else k
if v and isinstance(_lowerCamelCase , _lowerCamelCase ):
yield from flatten_dict(_lowerCamelCase , _lowerCamelCase , delimiter=_lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) )
@contextmanager
def A ( _lowerCamelCase , _lowerCamelCase = False ):
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def A ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.transpose(_lowerCamelCase , axes=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.T if axes is None else array.permute(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.transpose(_lowerCamelCase , perm=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.transpose(_lowerCamelCase , axes=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for transpose: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.reshape(_lowerCamelCase , _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.reshape(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.reshape(_lowerCamelCase , _lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.reshape(_lowerCamelCase , _lowerCamelCase )
else:
raise ValueError(F"Type not supported for reshape: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for squeeze: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.expand_dims(_lowerCamelCase , _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.unsqueeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(_lowerCamelCase , axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.expand_dims(_lowerCamelCase , axis=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.size(_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.numel()
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.size(_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return array.size
else:
raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(_lowerCamelCase , (tuple, list) ):
_lowerCAmelCase : List[Any] = [F"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
_lowerCAmelCase : Tuple = F"{repo_id}--{value}"
return auto_map
def A ( _lowerCamelCase ):
'''simple docstring'''
for base_class in inspect.getmro(_lowerCamelCase ):
_lowerCAmelCase : Tuple = base_class.__module__
_lowerCAmelCase : int = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"Could not infer framework from class {model_class}." )
| 36
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : List[str] = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowercase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 99
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(_lowerCamelCase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = _distribute_shards(**_lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = _split_gen_kwargs(_lowerCamelCase , _lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(_lowerCamelCase ):
_number_of_shards_in_gen_kwargs(_lowerCamelCase )
else:
_lowerCAmelCase : Optional[int] = _number_of_shards_in_gen_kwargs(_lowerCamelCase )
assert out == expected
| 36
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 100
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCAmelCase_ :
def __init__( self, __a = "cpu", __a = "openai/clip-vit-large-patch14"):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = device
_lowerCAmelCase : Optional[int] = CLIPTokenizerFast.from_pretrained(__a)
_lowerCAmelCase : Any = [0.48_145_466, 0.4_578_275, 0.40_821_073]
_lowerCAmelCase : Union[str, Any] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
_lowerCAmelCase : Tuple = torchvision.transforms.Normalize(self.image_mean, self.image_std)
_lowerCAmelCase : Optional[int] = torchvision.transforms.Resize(224)
_lowerCAmelCase : Dict = torchvision.transforms.CenterCrop(224)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.resize(__a)
_lowerCAmelCase : List[str] = self.center_crop(__a)
_lowerCAmelCase : Optional[Any] = self.normalize(__a)
return images
def __call__( self, __a=None, __a=None, **__a):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(text=__a, **__a)
_lowerCAmelCase : List[str] = self.preprocess_img(__a)
_lowerCAmelCase : Tuple = {key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class UpperCAmelCase_ ( nn.Module):
def __init__( self, __a=10, __a=0.01, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=False, __a=True, __a="image", __a=True, __a=False, __a=False, __a=False, ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : List[str] = device if device else get_device()
if vqgan:
_lowerCAmelCase : Union[str, Any] = vqgan
else:
_lowerCAmelCase : Optional[Any] = load_vqgan(self.device, conf_path=__a, ckpt_path=__a)
self.vqgan.eval()
if clip:
_lowerCAmelCase : str = clip
else:
_lowerCAmelCase : int = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
self.clip.to(self.device)
_lowerCAmelCase : Optional[int] = ProcessorGradientFlow(device=self.device)
_lowerCAmelCase : Any = iterations
_lowerCAmelCase : List[Any] = lr
_lowerCAmelCase : Tuple = log
_lowerCAmelCase : List[str] = make_grid
_lowerCAmelCase : int = return_val
_lowerCAmelCase : Dict = quantize
_lowerCAmelCase : Any = self.vqgan.decoder.z_shape
def snake_case__ ( self, __a=None, __a=None, __a=5, __a=True):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
if output_path is None:
_lowerCAmelCase : List[Any] = "./animation.gif"
if input_path is None:
_lowerCAmelCase : str = self.save_path
_lowerCAmelCase : str = sorted(glob(input_path + "/*"))
if not len(__a):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)")
if len(__a) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)")
_lowerCAmelCase : Optional[int] = total_duration / len(__a)
_lowerCAmelCase : Union[str, Any] = [frame_duration] * len(__a)
if extend_frames:
_lowerCAmelCase : Any = 1.5
_lowerCAmelCase : List[str] = 3
for file_name in paths:
if file_name.endswith(".png"):
images.append(imageio.imread(__a))
imageio.mimsave(__a, __a, duration=__a)
print(f"gif saved to {output_path}")
def snake_case__ ( self, __a=None, __a=None):
'''simple docstring'''
if not (path or img):
raise ValueError("Input either path or tensor")
if img is not None:
raise NotImplementedError
_lowerCAmelCase : Dict = preprocess(Image.open(__a), target_image_size=256).to(self.device)
_lowerCAmelCase : Dict = preprocess_vqgan(__a)
_lowerCAmelCase , *_lowerCAmelCase : str = self.vqgan.encode(__a)
return z
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.latent.detach().requires_grad_()
_lowerCAmelCase : Dict = base_latent + transform_vector
if self.quantize:
_lowerCAmelCase , *_lowerCAmelCase : List[Any] = self.vqgan.quantize(__a)
else:
_lowerCAmelCase : Any = trans_latent
return self.vqgan.decode(__a)
def snake_case__ ( self, __a, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : int = self.clip_preprocessor(text=__a, images=__a, return_tensors="pt", padding=__a)
_lowerCAmelCase : Optional[int] = self.clip(**__a)
_lowerCAmelCase : Any = clip_outputs.logits_per_image
if weights is not None:
_lowerCAmelCase : Tuple = similarity_logits * weights
return similarity_logits.sum()
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self._get_clip_similarity(pos_prompts["prompts"], __a, weights=(1 / pos_prompts["weights"]))
if neg_prompts:
_lowerCAmelCase : List[Any] = self._get_clip_similarity(neg_prompts["prompts"], __a, weights=neg_prompts["weights"])
else:
_lowerCAmelCase : Union[str, Any] = torch.tensor([1], device=self.device)
_lowerCAmelCase : List[str] = -torch.log(__a) + torch.log(__a)
return loss
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.randn_like(self.latent, requires_grad=__a, device=self.device)
_lowerCAmelCase : Optional[int] = torch.optim.Adam([vector], lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
_lowerCAmelCase : Any = self._add_vector(__a)
_lowerCAmelCase : Optional[Any] = loop_post_process(__a)
_lowerCAmelCase : Optional[Any] = self._get_CLIP_loss(__a, __a, __a)
print("CLIP loss", __a)
if self.log:
wandb.log({"CLIP Loss": clip_loss})
clip_loss.backward(retain_graph=__a)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
wandb.init(reinit=__a, project="face-editor")
wandb.config.update({"Positive Prompts": positive_prompts})
wandb.config.update({"Negative Prompts": negative_prompts})
wandb.config.update({"lr": self.lr, "iterations": self.iterations})
if image_path:
_lowerCAmelCase : str = Image.open(__a)
_lowerCAmelCase : int = image.resize((256, 256))
wandb.log("Original Image", wandb.Image(__a))
def snake_case__ ( self, __a):
'''simple docstring'''
if not prompts:
return []
_lowerCAmelCase : int = []
_lowerCAmelCase : List[str] = []
if isinstance(__a, __a):
_lowerCAmelCase : Union[str, Any] = [prompt.strip() for prompt in prompts.split("|")]
for prompt in prompts:
if isinstance(__a, (tuple, list)):
_lowerCAmelCase : Optional[Any] = prompt[0]
_lowerCAmelCase : Union[str, Any] = float(prompt[1])
elif ":" in prompt:
_lowerCAmelCase , _lowerCAmelCase : int = prompt.split(":")
_lowerCAmelCase : Optional[Any] = float(__a)
else:
_lowerCAmelCase : Optional[int] = prompt
_lowerCAmelCase : List[Any] = 1.0
processed_prompts.append(__a)
weights.append(__a)
return {
"prompts": processed_prompts,
"weights": torch.tensor(__a, device=self.device),
}
def snake_case__ ( self, __a, __a=None, __a=None, __a=True, __a=False, __a=True, __a=True, __a=None, ):
'''simple docstring'''
if image_path:
_lowerCAmelCase : List[Any] = self._get_latent(__a)
else:
_lowerCAmelCase : Any = torch.randn(self.latent_dim, device=self.device)
if self.log:
self._init_logging(__a, __a, __a)
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCAmelCase : int = self.process_prompts(__a)
_lowerCAmelCase : List[str] = self.process_prompts(__a)
if save_final and save_path is None:
_lowerCAmelCase : int = os.path.join("./outputs/", "_".join(pos_prompts["prompts"]))
if not os.path.exists(__a):
os.makedirs(__a)
else:
_lowerCAmelCase : Tuple = save_path + "_" + get_timestamp()
os.makedirs(__a)
_lowerCAmelCase : Tuple = save_path
_lowerCAmelCase : List[Any] = self.vqgan.decode(self.latent)[0]
if show_intermediate:
print("Original Image")
show_pil(custom_to_pil(__a))
_lowerCAmelCase : int = loop_post_process(__a)
for iter, transformed_img in enumerate(self._optimize_CLIP(__a, __a, __a)):
if show_intermediate:
show_pil(__a)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png"))
if self.log:
wandb.log({"Image": wandb.Image(__a)})
if show_final:
show_pil(__a)
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png"))
| 36
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowercase__ :List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowercase__ :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 101
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_snake_case = get_tests_dir("fixtures")
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = mock.Mock()
_lowerCAmelCase : int = 500
_lowerCAmelCase : Tuple = {}
_lowerCAmelCase : str = HTTPError
_lowerCAmelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
_lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request", return_value=__a) as mock_head:
_lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# This check we did call the fake head request
mock_head.assert_called()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json")
def snake_case__ ( self):
'''simple docstring'''
with self.assertRaises(__a):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase : int = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants")
_lowerCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants", subfolder="feature_extractor")
self.assertIsNotNone(__a)
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase):
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TOKEN
HfFolder.save_token(__a)
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id="test-image-processor")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-image-processor-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="test-dynamic-image-processor")
except HTTPError:
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(__a)
image_processor.push_to_hub("test-image-processor", use_auth_token=self._token)
_lowerCAmelCase : str = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a))
# Reset repo
delete_repo(token=self._token, repo_id="test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__a, repo_id="test-image-processor", push_to_hub=__a, use_auth_token=self._token)
_lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = ViTImageProcessor.from_pretrained(__a)
image_processor.push_to_hub("valid_org/test-image-processor", use_auth_token=self._token)
_lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained("valid_org/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a))
# Reset repo
delete_repo(token=self._token, repo_id="valid_org/test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__a, repo_id="valid_org/test-image-processor-org", push_to_hub=__a, use_auth_token=self._token)
_lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org")
for k, v in image_processor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a))
def snake_case__ ( self):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
_lowerCAmelCase : List[str] = CustomImageProcessor.from_pretrained(__a)
image_processor.push_to_hub("test-dynamic-image-processor", use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"}, )
_lowerCAmelCase : Tuple = AutoImageProcessor.from_pretrained(
f"{USER}/test-dynamic-image-processor", trust_remote_code=__a)
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, "CustomImageProcessor")
| 36
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=4 , ):
'''simple docstring'''
__snake_case : Union[str, Any] = parent
__snake_case : Dict = batch_size
__snake_case : Optional[int] = seq_length
__snake_case : Tuple = is_training
__snake_case : Optional[int] = use_attention_mask
__snake_case : Dict = use_token_type_ids
__snake_case : Dict = use_labels
__snake_case : Tuple = vocab_size
__snake_case : Tuple = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : Any = hidden_act
__snake_case : Union[str, Any] = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : Dict = max_position_embeddings
__snake_case : str = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Optional[int] = initializer_range
__snake_case : Any = num_choices
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Union[str, Any] = None
if self.use_attention_mask:
__snake_case : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Optional[Any] = None
if self.use_token_type_ids:
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Tuple = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = config_and_inputs
__snake_case : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = FlaxAlbertModelTester(self )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case : Union[str, Any] = model_class_name.from_pretrained('''albert-base-v2''' )
__snake_case : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(a_ )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
__snake_case : List[str] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__snake_case : Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__snake_case : Dict = model(a_ , attention_mask=a_ )[0]
__snake_case : Dict = (1, 11, 7_68)
self.assertEqual(output.shape , a_ )
__snake_case : int = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
| 102
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=24, __a=2, __a=6, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=None, __a=1000, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : int = seq_length
_lowerCAmelCase : Optional[int] = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : List[str] = use_token_type_ids
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : int = max_position_embeddings
_lowerCAmelCase : Optional[int] = type_vocab_size
_lowerCAmelCase : Optional[Any] = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : List[Any] = num_labels
_lowerCAmelCase : Tuple = scope
_lowerCAmelCase : str = range_bbox
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : Dict = bbox[i, j, 3]
_lowerCAmelCase : int = bbox[i, j, 1]
_lowerCAmelCase : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : str = bbox[i, j, 2]
_lowerCAmelCase : List[Any] = bbox[i, j, 0]
_lowerCAmelCase : str = t
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
_lowerCAmelCase : Dict = None
if self.use_token_type_ids:
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : Optional[int] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = LiltModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(__a, bbox=__a, attention_mask=__a, token_type_ids=__a)
_lowerCAmelCase : str = model(__a, bbox=__a, token_type_ids=__a)
_lowerCAmelCase : List[Any] = model(__a, bbox=__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Optional[Any] = LiltForTokenClassification(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(
__a, bbox=__a, attention_mask=__a, token_type_ids=__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = LiltForQuestionAnswering(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Tuple = model(
__a, bbox=__a, attention_mask=__a, token_type_ids=__a, start_positions=__a, end_positions=__a, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , a , unittest.TestCase):
lowerCamelCase__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
return True
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = LiltModelTester(self)
_lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : Any = type
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = LiltModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_torch
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__a)
_lowerCAmelCase : Any = torch.tensor([[1, 2]], device=__a)
_lowerCAmelCase : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]], device=__a)
# forward pass
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(input_ids=__a, bbox=__a)
_lowerCAmelCase : Optional[int] = torch.Size([1, 2, 768])
_lowerCAmelCase : List[str] = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]], device=__a, )
self.assertTrue(outputs.last_hidden_state.shape, __a)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3], __a, atol=1E-3))
| 36
| 0
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def UpperCamelCase( __UpperCamelCase : Dict ,__UpperCamelCase : Dict ):
lowerCAmelCase_ : Optional[Any] = k_size // 2
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCAmelCase_ : List[str] = 1 / (2 * pi * sigma) * exp(-(square(__UpperCamelCase ) + square(__UpperCamelCase )) / (2 * square(__UpperCamelCase )) )
return g
def UpperCamelCase( __UpperCamelCase : Tuple ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[str] ):
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = image.shape[0], image.shape[1]
# dst image height and width
lowerCAmelCase_ : Union[str, Any] = height - k_size + 1
lowerCAmelCase_ : Optional[Any] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCAmelCase_ : Optional[Any] = zeros((dst_height * dst_width, k_size * k_size) )
lowerCAmelCase_ : Union[str, Any] = 0
for i, j in product(range(__UpperCamelCase ) ,range(__UpperCamelCase ) ):
lowerCAmelCase_ : List[Any] = ravel(image[i : i + k_size, j : j + k_size] )
lowerCAmelCase_ : List[Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCAmelCase_ : Dict = gen_gaussian_kernel(__UpperCamelCase ,__UpperCamelCase )
lowerCAmelCase_ : str = ravel(__UpperCamelCase )
# reshape and get the dst image
lowerCAmelCase_ : Dict = dot(__UpperCamelCase ,__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase ).astype(__UpperCamelCase )
return dst
if __name__ == "__main__":
# read original image
A__ : List[Any] = imread(R'''../image_data/lena.jpg''')
# turn image in gray scale value
A__ : Any = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
A__ : List[str] = gaussian_filter(gray, 3, sigma=1)
A__ : Any = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 103
|
import argparse
import copy
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = {}
with open(_lowerCamelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_lowerCAmelCase : Tuple = []
_list.append([line.split()[1], line.split()[2]] )
_lowerCAmelCase : Any = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_lowerCAmelCase : str = []
_list.append([line.split()[0], line.split()[2]] )
_lowerCAmelCase : Any = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase ) as f:
_lowerCAmelCase : str = f.read(1 )
_lowerCAmelCase : str = start_node
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Any = start_node
_lowerCAmelCase : str = 0
while visiting not in first_solution:
_lowerCAmelCase : Dict = 10_000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_lowerCamelCase ) and k[0] not in first_solution:
_lowerCAmelCase : List[str] = k[1]
_lowerCAmelCase : List[Any] = k[0]
first_solution.append(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = distance_of_first_solution + int(_lowerCamelCase )
_lowerCAmelCase : str = best_node
first_solution.append(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_lowerCAmelCase : Tuple = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10_000
)
return first_solution, distance_of_first_solution
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = []
for n in solution[1:-1]:
_lowerCAmelCase : Dict = solution.index(_lowerCamelCase )
for kn in solution[1:-1]:
_lowerCAmelCase : Dict = solution.index(_lowerCamelCase )
if n == kn:
continue
_lowerCAmelCase : Optional[int] = copy.deepcopy(_lowerCamelCase )
_lowerCAmelCase : int = kn
_lowerCAmelCase : Dict = n
_lowerCAmelCase : Optional[int] = 0
for k in _tmp[:-1]:
_lowerCAmelCase : str = _tmp[_tmp.index(_lowerCamelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_lowerCAmelCase : Optional[Any] = distance + int(i[1] )
_tmp.append(_lowerCamelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_lowerCAmelCase : List[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _lowerCamelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : int = first_solution
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Tuple = distance_of_first_solution
_lowerCAmelCase : Optional[int] = solution
while count <= iters:
_lowerCAmelCase : int = find_neighborhood(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = neighborhood[index_of_best_solution]
_lowerCAmelCase : int = len(_lowerCamelCase ) - 1
_lowerCAmelCase : Union[str, Any] = False
while not found:
_lowerCAmelCase : Tuple = 0
while i < len(_lowerCamelCase ):
if best_solution[i] != solution[i]:
_lowerCAmelCase : str = best_solution[i]
_lowerCAmelCase : Tuple = solution[i]
break
_lowerCAmelCase : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Optional[Any] = best_solution[:-1]
_lowerCAmelCase : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_lowerCAmelCase : Union[str, Any] = cost
_lowerCAmelCase : List[Any] = solution
else:
_lowerCAmelCase : Optional[Any] = index_of_best_solution + 1
_lowerCAmelCase : Optional[Any] = neighborhood[index_of_best_solution]
if len(_lowerCamelCase ) >= size:
tabu_list.pop(0 )
_lowerCAmelCase : int = count + 1
return best_solution_ever, best_cost
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = generate_neighbours(args.File )
_lowerCAmelCase , _lowerCAmelCase : List[str] = generate_first_solution(
args.File , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Any = tabu_search(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 36
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ['pixel_values']
def __init__( self : Optional[int] ,lowercase__ : bool = True ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : PILImageResampling = PILImageResampling.BICUBIC ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Union[int, float] = 1 / 2_5_5 ,lowercase__ : Dict[str, int] = None ,lowercase__ : bool = True ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,**lowercase__ : Tuple ,):
super().__init__(**lowercase__ )
__lowercase = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
__lowercase = get_size_dict(lowercase__ )
__lowercase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
__lowercase = get_size_dict(lowercase__ ,default_to_square=lowercase__ ,param_name='''crop_size''' )
__lowercase = do_resize
__lowercase = do_rescale
__lowercase = do_normalize
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = size
__lowercase = resample
__lowercase = rescale_factor
__lowercase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : np.ndarray ,lowercase__ : Dict[str, int] ,lowercase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : Optional[Any] ,):
__lowercase = get_size_dict(lowercase__ )
if "shortest_edge" in size:
__lowercase = get_resize_output_image_size(lowercase__ ,size=size['''shortest_edge'''] ,default_to_square=lowercase__ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__lowercase = (size['''height'''], size['''width'''])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(lowercase__ ,size=lowercase__ ,resample=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : np.ndarray ,lowercase__ : Dict[str, int] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : int ,):
__lowercase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(lowercase__ ,size=(size['''height'''], size['''width''']) ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : float ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : Any ):
return rescale(lowercase__ ,scale=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Union[float, List[float]] ,lowercase__ : Union[float, List[float]] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[str] ,):
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[bool] = None ,lowercase__ : Dict[str, int] = None ,lowercase__ : PILImageResampling = None ,lowercase__ : bool = None ,lowercase__ : int = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[float] = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**lowercase__ : Tuple ,):
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase = crop_size if crop_size is not None else self.crop_size
__lowercase = get_size_dict(lowercase__ ,param_name='''crop_size''' ,default_to_square=lowercase__ )
__lowercase = resample if resample is not None else self.resample
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(lowercase__ )
if not is_batched(lowercase__ ):
__lowercase = [images]
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__lowercase = [self.resize(image=lowercase__ ,size=lowercase__ ,resample=lowercase__ ) for image in images]
if do_center_crop:
__lowercase = [self.center_crop(image=lowercase__ ,size=lowercase__ ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=lowercase__ ,scale=lowercase__ ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ,mean=lowercase__ ,std=lowercase__ ) for image in images]
__lowercase = [to_channel_dimension_format(lowercase__ ,lowercase__ ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=lowercase__ ,tensor_type=lowercase__ )
| 104
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = BartphoTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : str = ["▁This", "▁is", "▁a", "▁t", "est"]
_lowerCAmelCase : List[str] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file, "w", encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n")
_lowerCAmelCase : Optional[Any] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self, **__a):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "This is a là test"
_lowerCAmelCase : Optional[int] = "This is a<unk><unk> test"
return input_text, output_text
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map)
_lowerCAmelCase : List[Any] = "This is a là test"
_lowerCAmelCase : str = "▁This ▁is ▁a ▁l à ▁t est".split()
_lowerCAmelCase : str = tokenizer.tokenize(__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a), __a)
| 36
| 0
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _SCREAMING_SNAKE_CASE ( ) ->List[str]:
'''simple docstring'''
a : List[Any] = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_lowercase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_lowercase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_lowercase )
return parser.parse_args()
def _SCREAMING_SNAKE_CASE ( ) ->List[str]:
'''simple docstring'''
a : Optional[int] = parse_args()
# Import training_script as a module.
a : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
a : List[str] = script_fpath.stem
a : Optional[Any] = importlib.import_module(_lowercase )
# Patch sys.argv
a : Optional[int] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 105
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def constraint_to_multiple_of(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=None ):
_lowerCAmelCase : Tuple = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
_lowerCAmelCase : List[str] = math.ceil(val / multiple ) * multiple
return x
_lowerCAmelCase : Union[str, Any] = (output_size, output_size) if isinstance(_lowerCamelCase , _lowerCamelCase ) else output_size
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = get_image_size(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Any = output_size
# determine new height and width
_lowerCAmelCase : List[Any] = output_height / input_height
_lowerCAmelCase : Any = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowerCAmelCase : Union[str, Any] = scale_width
else:
# fit height
_lowerCAmelCase : Union[str, Any] = scale_height
_lowerCAmelCase : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCamelCase )
_lowerCAmelCase : Dict = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCamelCase )
return (new_height, new_width)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ['pixel_values']
def __init__( self, __a = True, __a = None, __a = PILImageResampling.BILINEAR, __a = False, __a = 1, __a = True, __a = 1 / 255, __a = True, __a = None, __a = None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Any = size if size is not None else {"height": 384, "width": 384}
_lowerCAmelCase : Optional[int] = get_size_dict(__a)
_lowerCAmelCase : Optional[Any] = do_resize
_lowerCAmelCase : Dict = size
_lowerCAmelCase : Any = keep_aspect_ratio
_lowerCAmelCase : str = ensure_multiple_of
_lowerCAmelCase : str = resample
_lowerCAmelCase : Dict = do_rescale
_lowerCAmelCase : Optional[int] = rescale_factor
_lowerCAmelCase : Dict = do_normalize
_lowerCAmelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self, __a, __a, __a = False, __a = 1, __a = PILImageResampling.BICUBIC, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}")
_lowerCAmelCase : List[Any] = get_resize_output_image_size(
__a, output_size=(size["height"], size["width"]), keep_aspect_ratio=__a, multiple=__a, )
return resize(__a, size=__a, resample=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return rescale(__a, scale=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return normalize(__a, mean=__a, std=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = ChannelDimension.FIRST, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : List[Any] = size if size is not None else self.size
_lowerCAmelCase : str = get_size_dict(__a)
_lowerCAmelCase : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowerCAmelCase : Any = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowerCAmelCase : int = resample if resample is not None else self.resample
_lowerCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : List[str] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : Optional[Any] = make_list_of_images(__a)
if not valid_images(__a):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
_lowerCAmelCase : List[Any] = [to_numpy_array(__a) for image in images]
if do_resize:
_lowerCAmelCase : Any = [self.resize(image=__a, size=__a, resample=__a) for image in images]
if do_rescale:
_lowerCAmelCase : List[str] = [self.rescale(image=__a, scale=__a) for image in images]
if do_normalize:
_lowerCAmelCase : Dict = [self.normalize(image=__a, mean=__a, std=__a) for image in images]
_lowerCAmelCase : List[str] = [to_channel_dimension_format(__a, __a) for image in images]
_lowerCAmelCase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=__a, tensor_type=__a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__a) != len(__a):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(__a):
_lowerCAmelCase : List[Any] = target_sizes.numpy()
_lowerCAmelCase : Dict = []
for idx in range(len(__a)):
_lowerCAmelCase : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=__a)
_lowerCAmelCase : int = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(__a)
else:
_lowerCAmelCase : Dict = logits.argmax(dim=1)
_lowerCAmelCase : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 36
| 0
|
"""simple docstring"""
import sys
import turtle
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(A_ , get_mid(A_ , A_ ) , get_mid(A_ , A_ ) , depth - 1 )
triangle(A_ , get_mid(A_ , A_ ) , get_mid(A_ , A_ ) , depth - 1 )
triangle(A_ , get_mid(A_ , A_ ) , get_mid(A_ , A_ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
__UpperCamelCase : str = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
__UpperCamelCase : Union[str, Any] = [(-1_7_5, -1_2_5), (0, 1_7_5), (1_7_5, -1_2_5)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 106
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = "huggingface/label-files"
_lowerCAmelCase : int = "imagenet-1k-id2label.json"
_lowerCAmelCase : Tuple = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : Tuple = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : Tuple = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_lowerCAmelCase : Optional[int] = BitConfig(
conv_layer=_lowerCamelCase , num_labels=1_000 , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , )
return config
def A ( _lowerCamelCase ):
'''simple docstring'''
if "stem.conv" in name:
_lowerCAmelCase : List[str] = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
_lowerCAmelCase : Any = name.replace("blocks" , "layers" )
if "head.fc" in name:
_lowerCAmelCase : Optional[Any] = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
_lowerCAmelCase : Any = "bit." + name
if "bit" not in name and "classifier" not in name:
_lowerCAmelCase : Dict = "bit.encoder." + name
return name
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = get_config(_lowerCamelCase )
# load original model from timm
_lowerCAmelCase : int = create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model
_lowerCAmelCase : Any = timm_model.state_dict()
for key in state_dict.copy().keys():
_lowerCAmelCase : Dict = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val.squeeze() if "head" in key else val
# load HuggingFace model
_lowerCAmelCase : Optional[Any] = BitForImageClassification(_lowerCamelCase )
model.eval()
model.load_state_dict(_lowerCamelCase )
# create image processor
_lowerCAmelCase : Dict = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = transform.transforms
_lowerCAmelCase : Tuple = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
_lowerCAmelCase : Tuple = BitImageProcessor(
do_resize=_lowerCamelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Any = transform(_lowerCamelCase ).unsqueeze(0 )
_lowerCAmelCase : Optional[int] = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
# verify logits
with torch.no_grad():
_lowerCAmelCase : Tuple = model(_lowerCamelCase )
_lowerCAmelCase : str = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
_lowerCAmelCase : Union[str, Any] = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(F"ybelkada/{model_name}" )
processor.push_to_hub(F"ybelkada/{model_name}" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
_snake_case = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 36
| 0
|
from sklearn.metrics import recall_score
import datasets
__lowerCAmelCase : List[str] = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
__lowerCAmelCase : Optional[Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
__lowerCAmelCase : Optional[int] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def __UpperCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : str=None , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : Any="binary" , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]="warn" , ) -> Union[str, Any]:
a = recall_score(
__lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , )
return {"recall": float(__lowerCamelCase ) if score.size == 1 else score}
| 107
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'swin'
lowerCamelCase__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=[2, 2, 6, 2], __a=[3, 6, 12, 24], __a=7, __a=4.0, __a=True, __a=0.0, __a=0.0, __a=0.1, __a="gelu", __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Any = image_size
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : Tuple = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Optional[Any] = len(__a)
_lowerCAmelCase : int = num_heads
_lowerCAmelCase : int = window_size
_lowerCAmelCase : int = mlp_ratio
_lowerCAmelCase : List[Any] = qkv_bias
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Any = drop_path_rate
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Tuple = use_absolute_embeddings
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Tuple = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : List[str] = int(embed_dim * 2 ** (len(__a) - 1))
_lowerCAmelCase : List[Any] = ["stem"] + [f"stage{idx}" for idx in range(1, len(__a) + 1)]
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 36
| 0
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCAmelCase__ = ['''text''', '''image''', '''audio''']
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Any = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
inputs.append(create_inputs(SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def a__ ( SCREAMING_SNAKE_CASE : List ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = []
for output in outputs:
if isinstance(SCREAMING_SNAKE_CASE , (str, AgentText) ):
output_types.append("text" )
elif isinstance(SCREAMING_SNAKE_CASE , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(SCREAMING_SNAKE_CASE , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
lowerCAmelCase : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , snake_case__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCAmelCase : Union[str, Any] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = create_inputs(self.tool.inputs )
lowerCAmelCase : str = self.tool(*snake_case__ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCAmelCase : Union[str, Any] = [outputs]
self.assertListEqual(output_types(snake_case__ ) , self.tool.outputs )
def lowercase__ ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = create_inputs(self.tool.inputs )
lowerCAmelCase : str = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : int = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
for output, output_type in zip(snake_case__ , self.tool.outputs ):
lowerCAmelCase : List[str] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(snake_case__ , snake_case__ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = create_inputs(self.tool.inputs )
lowerCAmelCase : Any = []
for _input, input_type in zip(snake_case__ , self.tool.inputs ):
if isinstance(snake_case__ , snake_case__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCAmelCase : int = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : List[Any] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
| 108
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 36
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
A: str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Optional[Any] = ['input_features', 'attention_mask']
def __init__( self , _SCREAMING_SNAKE_CASE=80 , _SCREAMING_SNAKE_CASE=16000 , _SCREAMING_SNAKE_CASE=80 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> Dict:
'''simple docstring'''
super().__init__(feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = num_mel_bins
UpperCAmelCase : Tuple = do_ceptral_normalize
UpperCAmelCase : Optional[int] = normalize_means
UpperCAmelCase : Any = normalize_vars
UpperCAmelCase : Any = True
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase : Optional[int] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
UpperCAmelCase : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
UpperCAmelCase : Dict = ta_kaldi.fbank(_SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray:
'''simple docstring'''
if normalize_means:
UpperCAmelCase : Tuple = x[:input_length].mean(axis=0 )
UpperCAmelCase : Optional[Any] = np.subtract(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if normalize_vars:
UpperCAmelCase : Tuple = x[:input_length].std(axis=0 )
UpperCAmelCase : Dict = np.divide(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if input_length < x.shape[0]:
UpperCAmelCase : Optional[int] = padding_value
# make sure array is in float32
UpperCAmelCase : Any = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]:
'''simple docstring'''
UpperCAmelCase : Any = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCAmelCase : Union[str, Any] = isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
UpperCAmelCase : Union[str, Any] = is_batched_numpy or (
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase : Any = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCAmelCase : Union[str, Any] = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase : List[str] = [raw_speech]
# extract fbank features
UpperCAmelCase : Optional[int] = [self._extract_fbank_features(_SCREAMING_SNAKE_CASE ) for waveform in raw_speech]
# convert into correct format for padding
UpperCAmelCase : Optional[Any] = BatchFeature({"""input_features""": features} )
UpperCAmelCase : Tuple = self.pad(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# make sure list is in array format
UpperCAmelCase : str = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[int] = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
UpperCAmelCase : Optional[Any] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
UpperCAmelCase : int = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
UpperCAmelCase : List[str] = (
np.array(_SCREAMING_SNAKE_CASE , dtype=np.intaa )
if self._get_padding_strategies(_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCAmelCase : Optional[int] = self.normalize(
padded_inputs["""input_features"""] , attention_mask=_SCREAMING_SNAKE_CASE )
if return_tensors is not None:
UpperCAmelCase : List[str] = padded_inputs.convert_to_tensors(_SCREAMING_SNAKE_CASE )
return padded_inputs
| 109
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
_snake_case = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if got_ver is None or want_ver is None:
raise ValueError(
F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
F" reinstalling {pkg}." )
if not ops[op](version.parse(_lowerCamelCase ) , version.parse(_lowerCamelCase ) ):
raise ImportError(
F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def A ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = F"\n{hint}" if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$" , _lowerCamelCase ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = requirement, None, None
else:
_lowerCAmelCase : Optional[int] = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , _lowerCamelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
F" got {requirement}" )
_lowerCAmelCase , _lowerCAmelCase : Dict = match[0]
_lowerCAmelCase : Any = want_full.split("," ) # there could be multiple requirements
_lowerCAmelCase : Optional[int] = {}
for w in want_range:
_lowerCAmelCase : Any = re.findall(r"^([\s!=<>]{1,2})(.+)" , _lowerCamelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
F" but got {requirement}" )
_lowerCAmelCase , _lowerCAmelCase : Tuple = match[0]
_lowerCAmelCase : Union[str, Any] = want_ver
if op not in ops:
raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
_lowerCAmelCase : Tuple = ".".join([str(_lowerCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return
# check if any version is installed
try:
_lowerCAmelCase : Any = importlib.metadata.version(_lowerCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(_lowerCamelCase , _lowerCamelCase )
| 36
| 0
|
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = abs(SCREAMING_SNAKE_CASE )
lowercase__ = 0
while n > 0:
res += n % 10
n //= 10
return res
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = abs(SCREAMING_SNAKE_CASE )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return sum(int(SCREAMING_SNAKE_CASE ) for c in str(abs(SCREAMING_SNAKE_CASE ) ) )
def _a ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
lowercase__ = f'{func.__name__}({value})'
lowercase__ = timeit(f'__main__.{call}' , setup='''import __main__''' )
print(f'{call:56} = {func(SCREAMING_SNAKE_CASE )} -- {timing:.4f} seconds' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 110
|
import argparse
from collections import defaultdict
import yaml
_snake_case = "docs/source/en/_toctree.yml"
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = defaultdict(_lowerCamelCase )
_lowerCAmelCase : Any = []
_lowerCAmelCase : List[str] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = new_doc_list
_lowerCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase : str = []
for duplicate_key in duplicates:
_lowerCAmelCase : List[str] = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(_lowerCamelCase ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
_lowerCAmelCase : Optional[Any] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowerCamelCase ) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed." )
overview_doc.extend(_lowerCamelCase )
# Sort
return overview_doc
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : int = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : List[str] = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : Union[str, Any] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowerCAmelCase : Optional[Any] = api_doc[scheduler_idx]["sections"]
_lowerCAmelCase : Optional[Any] = clean_doc_toc(_lowerCamelCase )
_lowerCAmelCase : int = False
if new_scheduler_doc != scheduler_doc:
_lowerCAmelCase : List[Any] = True
if overwrite:
_lowerCAmelCase : Dict = new_scheduler_doc
if diff:
if overwrite:
_lowerCAmelCase : Tuple = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : int = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : List[str] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Optional[int] = api_doc[pipeline_idx]["sections"]
_lowerCAmelCase : Tuple = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowerCAmelCase : List[Any] = pipeline_doc["section"]
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
if overwrite:
_lowerCAmelCase : Optional[Any] = new_sub_pipeline_doc
new_pipeline_docs.append(_lowerCamelCase )
# sort overall pipeline doc
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
if new_pipeline_docs != pipeline_docs:
_lowerCAmelCase : Dict = True
if overwrite:
_lowerCAmelCase : Optional[int] = new_pipeline_docs
if diff:
if overwrite:
_lowerCAmelCase : Optional[int] = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_snake_case = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 36
| 0
|
from __future__ import annotations
import requests
lowercase : Optional[int] = set(
"""approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports""".split()
)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = "new" , SCREAMING_SNAKE_CASE__ = None ) -> List[str]:
lowercase : Optional[int] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(_lowerCamelCase ) - valid_terms ) ):
lowercase : int = f"Invalid search term: {invalid_search_terms}"
raise ValueError(_lowerCamelCase )
lowercase : Optional[int] = requests.get(
f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}" , headers={"""User-agent""": """A random string"""} , )
if response.status_code == 429:
raise requests.HTTPError
lowercase : List[str] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(_lowerCamelCase )}
lowercase : str = {}
for id_ in range(_lowerCamelCase ):
lowercase : Optional[int] = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 20
|
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
a__ : Any = False
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
return 1_2
@property
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
return 1_2
@property
def UpperCAmelCase_ ( self : Dict ) -> int:
return 3_2
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def UpperCAmelCase_ ( self : int ) -> Any:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(__a )
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = 1_2
__SCREAMING_SNAKE_CASE = 1_2
__SCREAMING_SNAKE_CASE = {
"attention_bias": True,
"cross_attention_dim": 3_2,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 3_2,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
__SCREAMING_SNAKE_CASE = TransformeraDModel(**__a )
return model
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE = "cpu"
__SCREAMING_SNAKE_CASE = self.dummy_vqvae
__SCREAMING_SNAKE_CASE = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE = self.dummy_transformer
__SCREAMING_SNAKE_CASE = VQDiffusionScheduler(self.num_embed )
__SCREAMING_SNAKE_CASE = LearnedClassifierFreeSamplingEmbeddings(learnable=__a )
__SCREAMING_SNAKE_CASE = VQDiffusionPipeline(
vqvae=__a , text_encoder=__a , tokenizer=__a , transformer=__a , scheduler=__a , learned_classifier_free_sampling_embeddings=__a , )
__SCREAMING_SNAKE_CASE = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__SCREAMING_SNAKE_CASE = "teddy bear playing in the pool"
__SCREAMING_SNAKE_CASE = torch.Generator(device=__a ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe([prompt] , generator=__a , num_inference_steps=2 , output_type="np" )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = torch.Generator(device=__a ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
[prompt] , generator=__a , output_type="np" , return_dict=__a , num_inference_steps=2 )[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
__SCREAMING_SNAKE_CASE = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE = "cpu"
__SCREAMING_SNAKE_CASE = self.dummy_vqvae
__SCREAMING_SNAKE_CASE = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE = self.dummy_transformer
__SCREAMING_SNAKE_CASE = VQDiffusionScheduler(self.num_embed )
__SCREAMING_SNAKE_CASE = LearnedClassifierFreeSamplingEmbeddings(
learnable=__a , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
__SCREAMING_SNAKE_CASE = VQDiffusionPipeline(
vqvae=__a , text_encoder=__a , tokenizer=__a , transformer=__a , scheduler=__a , learned_classifier_free_sampling_embeddings=__a , )
__SCREAMING_SNAKE_CASE = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__SCREAMING_SNAKE_CASE = "teddy bear playing in the pool"
__SCREAMING_SNAKE_CASE = torch.Generator(device=__a ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe([prompt] , generator=__a , num_inference_steps=2 , output_type="np" )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = torch.Generator(device=__a ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
[prompt] , generator=__a , output_type="np" , return_dict=__a , num_inference_steps=2 )[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
__SCREAMING_SNAKE_CASE = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : str ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : int ) -> List[str]:
__SCREAMING_SNAKE_CASE = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
__SCREAMING_SNAKE_CASE = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
__SCREAMING_SNAKE_CASE = pipeline.to(__a )
pipeline.set_progress_bar_config(disable=__a )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__SCREAMING_SNAKE_CASE = torch.Generator(device=__a ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=__a , output_type="np" , )
__SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 54
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
_snake_case = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class UpperCAmelCase_ ( a):
def __init__( self, __a = 101):
'''simple docstring'''
_lowerCAmelCase : str = length
def __len__( self):
'''simple docstring'''
return self.length
def __getitem__( self, __a):
'''simple docstring'''
return i
class UpperCAmelCase_ :
def __call__( self, __a):
'''simple docstring'''
return {"input_ids": torch.tensor(__a), "labels": torch.tensor(__a)}
class UpperCAmelCase_ ( nn.Module):
def __init__( self):
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_lowerCAmelCase : str = nn.Linear(120, 80)
def snake_case__ ( self, __a, __a=None):
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0, device=input_ids.device), input_ids
else:
return input_ids
class UpperCAmelCase_ ( a):
@require_torch_neuroncore
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = f"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
_lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[int] = f"--output_dir {output_dir}".split()
_lowerCAmelCase : List[Any] = ["torchrun"] + distributed_args + args
execute_subprocess_async(__a, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class UpperCAmelCase_ ( a):
@require_torch_multi_gpu
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = f"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
_lowerCAmelCase : Any = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[int] = f"--output_dir {output_dir}".split()
_lowerCAmelCase : Any = ["torchrun"] + distributed_args + args
execute_subprocess_async(__a, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
_snake_case = HfArgumentParser((TrainingArguments,))
_snake_case = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
_snake_case = DummyDataset(dataset_length)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = list(range(len(_lowerCamelCase ) ) )
_lowerCAmelCase : Union[str, Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
F"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
_snake_case = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
_snake_case = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_snake_case = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_snake_case = 2
_snake_case = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_snake_case = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_snake_case = None
| 36
| 0
|
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , A__ , )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : List[Any] = RobertaConfig
a_ : Optional[Any] = """roberta"""
def __init__( self : Optional[Any] , a_ : Optional[Any] ):
super().__init__(__a )
lowerCAmelCase_ : Optional[Any] = RobertaEmbeddings(__a )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. """ , A__ , )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : str = RobertaConfig
a_ : int = """roberta"""
def __init__( self : str , a_ : Union[str, Any] ):
super().__init__(__a )
lowerCAmelCase_ : Optional[int] = config.num_labels
lowerCAmelCase_ : Optional[int] = config.num_hidden_layers
lowerCAmelCase_ : Optional[int] = DeeRobertaModel(__a )
lowerCAmelCase_ : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase_ : List[str] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__a )
def lowerCamelCase ( self : int , a_ : Optional[Any]=None , a_ : List[Any]=None , a_ : Optional[Any]=None , a_ : Any=None , a_ : Dict=None , a_ : int=None , a_ : Tuple=None , a_ : Tuple=-1 , a_ : Any=False , ):
lowerCAmelCase_ : Union[str, Any] = self.num_layers
try:
lowerCAmelCase_ : List[Any] = self.roberta(
__a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , )
lowerCAmelCase_ : List[Any] = outputs[1]
lowerCAmelCase_ : Dict = self.dropout(__a )
lowerCAmelCase_ : Dict = self.classifier(__a )
lowerCAmelCase_ : Optional[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCAmelCase_ : Tuple = e.message
lowerCAmelCase_ : Union[str, Any] = e.exit_layer
lowerCAmelCase_ : List[Any] = outputs[0]
if not self.training:
lowerCAmelCase_ : int = entropy(__a )
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : str = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase_ : Optional[Any] = MSELoss()
lowerCAmelCase_ : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase_ : Optional[Any] = CrossEntropyLoss()
lowerCAmelCase_ : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowerCAmelCase_ : Optional[int] = []
for highway_exit in outputs[-1]:
lowerCAmelCase_ : Any = highway_exit[0]
if not self.training:
highway_logits_all.append(__a )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase_ : List[str] = MSELoss()
lowerCAmelCase_ : List[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase_ : Dict = CrossEntropyLoss()
lowerCAmelCase_ : Optional[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__a )
if train_highway:
lowerCAmelCase_ : int = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCAmelCase_ : Any = (loss,) + outputs
if not self.training:
lowerCAmelCase_ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCAmelCase_ : Optional[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 241
|
from __future__ import annotations
import bisect
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
if hi < 0:
_lowerCAmelCase : int = len(_lowerCamelCase )
while lo < hi:
_lowerCAmelCase : Optional[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_lowerCAmelCase : Union[str, Any] = mid + 1
else:
_lowerCAmelCase : str = mid
return lo
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
if hi < 0:
_lowerCAmelCase : str = len(_lowerCamelCase )
while lo < hi:
_lowerCAmelCase : Tuple = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_lowerCAmelCase : Dict = mid + 1
else:
_lowerCAmelCase : str = mid
return lo
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_left(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_right(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 0
_lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase ) - 1
while left <= right:
_lowerCAmelCase : int = left + (right - left) // 2
_lowerCAmelCase : int = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_lowerCAmelCase : str = midpoint - 1
else:
_lowerCAmelCase : Any = midpoint + 1
return None
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = bisect.bisect_left(_lowerCamelCase , _lowerCamelCase )
if index != len(_lowerCamelCase ) and sorted_collection[index] == item:
return index
return None
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if right < left:
return None
_lowerCAmelCase : Optional[int] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , midpoint + 1 , _lowerCamelCase )
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by comma:\n").strip()
_snake_case = sorted(int(item) for item in user_input.split(","))
_snake_case = int(input("Enter a single number to be found in the list:\n"))
_snake_case = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''')
| 36
| 0
|
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Optional[int] = 'char'
_SCREAMING_SNAKE_CASE : Optional[Any] = 'bpe'
_SCREAMING_SNAKE_CASE : Tuple = 'wp'
_snake_case = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Optional[int] = ['image_processor', 'char_tokenizer']
_SCREAMING_SNAKE_CASE : Tuple = 'ViTImageProcessor'
_SCREAMING_SNAKE_CASE : List[Any] = 'MgpstrTokenizer'
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_lowercase : List[Any] = kwargs.pop("feature_extractor" )
_lowercase : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
_lowercase : int = tokenizer
_lowercase : Union[str, Any] = AutoTokenizer.from_pretrained("gpt2" )
_lowercase : Optional[Any] = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__a , __a )
def __call__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
_lowercase : int = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None:
_lowercase : List[Any] = self.char_tokenizer(__a , return_tensors=__a , **__a )
if text is None:
return inputs
elif images is None:
return encodings
else:
_lowercase : Tuple = encodings["input_ids"]
return inputs
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Optional[int] = sequences
_lowercase : Optional[int] = char_preds.size(0 )
_lowercase : Optional[Any] = self._decode_helper(__a , "char" )
_lowercase : Dict = self._decode_helper(__a , "bpe" )
_lowercase : Tuple = self._decode_helper(__a , "wp" )
_lowercase : Dict = []
_lowercase : int = []
for i in range(__a ):
_lowercase : Dict = [char_scores[i], bpe_scores[i], wp_scores[i]]
_lowercase : List[str] = [char_strs[i], bpe_strs[i], wp_strs[i]]
_lowercase : Optional[int] = scores.index(max(__a ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_lowercase : Optional[int] = {}
_lowercase : str = final_strs
_lowercase : Any = final_scores
_lowercase : List[Any] = char_strs
_lowercase : List[str] = bpe_strs
_lowercase : Dict = wp_strs
return out
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if format == DecodeType.CHARACTER:
_lowercase : int = self.char_decode
_lowercase : Union[str, Any] = 1
_lowercase : int = "[s]"
elif format == DecodeType.BPE:
_lowercase : int = self.bpe_decode
_lowercase : Tuple = 2
_lowercase : Dict = "#"
elif format == DecodeType.WORDPIECE:
_lowercase : Any = self.wp_decode
_lowercase : Union[str, Any] = 102
_lowercase : Optional[Any] = "[SEP]"
else:
raise ValueError(f'''Format {format} is not supported.''' )
_lowercase : Union[str, Any] = [], []
_lowercase : Dict = pred_logits.size(0 )
_lowercase : int = pred_logits.size(1 )
_lowercase : Dict = pred_logits.topk(1 , dim=-1 , largest=__a , sorted=__a )
_lowercase : Tuple = preds_index.view(-1 , __a )[:, 1:]
_lowercase : List[Any] = decoder(__a )
_lowercase : Optional[int] = torch.nn.functional.softmax(__a , dim=2 ).max(dim=2 )
_lowercase : str = preds_max_prob[:, 1:]
for index in range(__a ):
_lowercase : List[Any] = preds_str[index].find(__a )
_lowercase : int = preds_str[index][:pred_eos]
_lowercase : Tuple = preds_index[index].cpu().tolist()
_lowercase : List[Any] = pred_index.index(__a ) if eos_token in pred_index else -1
_lowercase : Dict = preds_max_prob[index][: pred_eos_index + 1]
_lowercase : int = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__a )
conf_scores.append(__a )
return dec_strs, conf_scores
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : List[Any] = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__a )]
return decode_strs
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(__a )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Optional[int] = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__a )]
return decode_strs
| 250
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCAmelCase_ ( a):
def snake_case__ ( self, __a):
'''simple docstring'''
return 0.0
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCAmelCase : Optional[int] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 512
_lowerCAmelCase : Union[str, Any] = [1] + [0] * (size - 1)
_lowerCAmelCase : Optional[Any] = [filter_type.process(_lowerCamelCase ) for item in inputs]
_lowerCAmelCase : int = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase : str = np.abs(np.fft.fft(_lowerCamelCase ) )
_lowerCAmelCase : Union[str, Any] = 20 * np.logaa(_lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
_lowerCAmelCase : List[Any] = get_bounds(_lowerCamelCase , _lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(_lowerCamelCase )
plt.show()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 512
_lowerCAmelCase : Optional[Any] = [1] + [0] * (size - 1)
_lowerCAmelCase : str = [filter_type.process(_lowerCamelCase ) for item in inputs]
_lowerCAmelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase : Optional[Any] = np.angle(np.fft.fft(_lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(_lowerCamelCase , -2 * pi ) )
plt.show()
| 36
| 0
|
"""simple docstring"""
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
A__ = gray_code_sequence_string(_lowerCamelCase )
#
# convert them to integers
for i in range(len(_lowerCamelCase ) ):
A__ = int(sequence[i] , 2 )
return sequence
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
A__ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
A__ = gray_code_sequence_string(bit_count - 1 )
A__ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
A__ = "0" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
A__ = "1" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 221
|
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
_lowerCAmelCase : List[str] = gray_code_sequence_string(_lowerCamelCase )
#
# convert them to integers
for i in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : List[str] = int(sequence[i] , 2 )
return sequence
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_lowerCAmelCase : List[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_lowerCAmelCase : Optional[int] = gray_code_sequence_string(bit_count - 1 )
_lowerCAmelCase : str = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_lowerCAmelCase : Dict = "0" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_lowerCAmelCase : Optional[Any] = "1" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
| 0
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__UpperCAmelCase = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase = '''ResNetConfig'''
# Base docstring
__UpperCAmelCase = '''microsoft/resnet-50'''
__UpperCAmelCase = [1, 2_048, 7, 7]
# Image classification docstring
__UpperCAmelCase = '''microsoft/resnet-50'''
__UpperCAmelCase = '''tiger cat'''
__UpperCAmelCase = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class lowerCAmelCase_ ( nn.Module ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 3, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = "relu" ) -> List[Any]:
super().__init__()
UpperCamelCase : Optional[Any] = nn.Convad(
__a, __a, kernel_size=__a, stride=__a, padding=kernel_size // 2, bias=__a )
UpperCamelCase : List[Any] = nn.BatchNormad(__a )
UpperCamelCase : Optional[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase : List[Any] = self.convolution(__a )
UpperCamelCase : Dict = self.normalization(__a )
UpperCamelCase : List[Any] = self.activation(__a )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
super().__init__()
UpperCamelCase : Optional[int] = ResNetConvLayer(
config.num_channels, config.embedding_size, kernel_size=7, stride=2, activation=config.hidden_act )
UpperCamelCase : Dict = nn.MaxPoolad(kernel_size=3, stride=2, padding=1 )
UpperCamelCase : Tuple = config.num_channels
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase : Optional[Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
UpperCamelCase : int = self.embedder(__a )
UpperCamelCase : Tuple = self.pooler(__a )
return embedding
class lowerCAmelCase_ ( nn.Module ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 2 ) -> List[str]:
super().__init__()
UpperCamelCase : Union[str, Any] = nn.Convad(__a, __a, kernel_size=1, stride=__a, bias=__a )
UpperCamelCase : int = nn.BatchNormad(__a )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = self.convolution(__a )
UpperCamelCase : Dict = self.normalization(__a )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = "relu" ) -> str:
super().__init__()
UpperCamelCase : Tuple = in_channels != out_channels or stride != 1
UpperCamelCase : Tuple = (
ResNetShortCut(__a, __a, stride=__a ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase : str = nn.Sequential(
ResNetConvLayer(__a, __a, stride=__a ), ResNetConvLayer(__a, __a, activation=__a ), )
UpperCamelCase : Union[str, Any] = ACTaFN[activation]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : int = hidden_state
UpperCamelCase : Dict = self.layer(__a )
UpperCamelCase : Optional[int] = self.shortcut(__a )
hidden_state += residual
UpperCamelCase : Tuple = self.activation(__a )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = "relu", SCREAMING_SNAKE_CASE_ = 4 ) -> List[str]:
super().__init__()
UpperCamelCase : int = in_channels != out_channels or stride != 1
UpperCamelCase : str = out_channels // reduction
UpperCamelCase : Optional[int] = (
ResNetShortCut(__a, __a, stride=__a ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase : Tuple = nn.Sequential(
ResNetConvLayer(__a, __a, kernel_size=1 ), ResNetConvLayer(__a, __a, stride=__a ), ResNetConvLayer(__a, __a, kernel_size=1, activation=__a ), )
UpperCamelCase : Optional[int] = ACTaFN[activation]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : Any = hidden_state
UpperCamelCase : Any = self.layer(__a )
UpperCamelCase : str = self.shortcut(__a )
hidden_state += residual
UpperCamelCase : int = self.activation(__a )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 2, SCREAMING_SNAKE_CASE_ = 2, ) -> Any:
super().__init__()
UpperCamelCase : List[str] = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
UpperCamelCase : Dict = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__a, __a, stride=__a, activation=config.hidden_act ), *[layer(__a, __a, activation=config.hidden_act ) for _ in range(depth - 1 )], )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : Tuple = input
for layer in self.layers:
UpperCamelCase : Tuple = layer(__a )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> Tuple:
super().__init__()
UpperCamelCase : Any = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__a, config.embedding_size, config.hidden_sizes[0], stride=2 if config.downsample_in_first_stage else 1, depth=config.depths[0], ) )
UpperCamelCase : Union[str, Any] = zip(config.hidden_sizes, config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__a, config.depths[1:] ):
self.stages.append(ResNetStage(__a, __a, __a, depth=__a ) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = True ) -> Any:
UpperCamelCase : Optional[int] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase : Optional[Any] = hidden_states + (hidden_state,)
UpperCamelCase : List[str] = stage_module(__a )
if output_hidden_states:
UpperCamelCase : str = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=__a, hidden_states=__a, )
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Dict = ResNetConfig
UpperCAmelCase__ : Union[str, Any] = "resnet"
UpperCAmelCase__ : Dict = "pixel_values"
UpperCAmelCase__ : Tuple = True
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Any:
if isinstance(__a, nn.Convad ):
nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu' )
elif isinstance(__a, (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight, 1 )
nn.init.constant_(module.bias, 0 )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> Optional[Any]:
if isinstance(__a, __a ):
UpperCamelCase : Tuple = value
__UpperCAmelCase = r'''\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'''
__UpperCAmelCase = r'''\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." , a__ , )
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> int:
super().__init__(__a )
UpperCamelCase : Tuple = config
UpperCamelCase : List[str] = ResNetEmbeddings(__a )
UpperCamelCase : List[str] = ResNetEncoder(__a )
UpperCamelCase : Any = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC, output_type=__a, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None ) -> Optional[int]:
UpperCamelCase : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase : List[str] = self.embedder(__a )
UpperCamelCase : int = self.encoder(
__a, output_hidden_states=__a, return_dict=__a )
UpperCamelCase : int = encoder_outputs[0]
UpperCamelCase : List[Any] = self.pooler(__a )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__a, pooler_output=__a, hidden_states=encoder_outputs.hidden_states, )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , a__ , )
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
super().__init__(__a )
UpperCamelCase : Tuple = config.num_labels
UpperCamelCase : str = ResNetModel(__a )
# classification head
UpperCamelCase : str = nn.Sequential(
nn.Flatten(), nn.Linear(config.hidden_sizes[-1], config.num_labels ) if config.num_labels > 0 else nn.Identity(), )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=__a, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, ) -> Optional[int]:
UpperCamelCase : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase : List[Any] = self.resnet(__a, output_hidden_states=__a, return_dict=__a )
UpperCamelCase : Tuple = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase : int = self.classifier(__a )
UpperCamelCase : Optional[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase : Optional[int] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase : Optional[Any] = "single_label_classification"
else:
UpperCamelCase : Tuple = "multi_label_classification"
if self.config.problem_type == "regression":
UpperCamelCase : Optional[int] = MSELoss()
if self.num_labels == 1:
UpperCamelCase : int = loss_fct(logits.squeeze(), labels.squeeze() )
else:
UpperCamelCase : Optional[Any] = loss_fct(__a, __a )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase : List[str] = CrossEntropyLoss()
UpperCamelCase : Any = loss_fct(logits.view(-1, self.num_labels ), labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase : Any = BCEWithLogitsLoss()
UpperCamelCase : str = loss_fct(__a, __a )
if not return_dict:
UpperCamelCase : Dict = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__a, logits=__a, hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " , a__ , )
class lowerCAmelCase_ ( a__ , a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> int:
super().__init__(__a )
super()._init_backbone(__a )
UpperCamelCase : Dict = [config.embedding_size] + config.hidden_sizes
UpperCamelCase : List[Any] = ResNetEmbeddings(__a )
UpperCamelCase : Union[str, Any] = ResNetEncoder(__a )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@replace_return_docstrings(output_type=__a, config_class=_CONFIG_FOR_DOC )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None ) -> str:
UpperCamelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase : Union[str, Any] = self.embedder(__a )
UpperCamelCase : Union[str, Any] = self.encoder(__a, output_hidden_states=__a, return_dict=__a )
UpperCamelCase : Tuple = outputs.hidden_states
UpperCamelCase : Dict = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
UpperCamelCase : str = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__a, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=__a, )
| 119
|
from PIL import Image
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = image.size
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Tuple = image.load()
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCamelCase ):
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_snake_case = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 36
| 0
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
lowerCamelCase_ = '''\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'''
lowerCamelCase_ = '''\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'''
lowerCamelCase_ = '''\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ (datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple ) -> Dict:
UpperCAmelCase_ : Optional[Any] = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
UpperCAmelCase_ : Optional[Any] = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
UpperCAmelCase_ : int = evaluate(dataset=__a , predictions=__a )
return score
| 268
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'wav2vec2'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : Union[str, Any] = feat_extract_activation
_lowerCAmelCase : Optional[Any] = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : str = list(__a)
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : str = num_conv_pos_embeddings
_lowerCAmelCase : List[Any] = num_conv_pos_embedding_groups
_lowerCAmelCase : str = len(self.conv_dim)
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : Optional[Any] = hidden_dropout
_lowerCAmelCase : List[str] = attention_dropout
_lowerCAmelCase : Tuple = activation_dropout
_lowerCAmelCase : int = feat_proj_dropout
_lowerCAmelCase : List[str] = final_dropout
_lowerCAmelCase : int = layerdrop
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : Optional[Any] = do_stable_layer_norm
_lowerCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : str = apply_spec_augment
_lowerCAmelCase : Optional[Any] = mask_time_prob
_lowerCAmelCase : Optional[int] = mask_time_length
_lowerCAmelCase : List[str] = mask_time_min_masks
_lowerCAmelCase : Optional[int] = mask_feature_prob
_lowerCAmelCase : Optional[int] = mask_feature_length
_lowerCAmelCase : List[str] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : Union[str, Any] = num_codevectors_per_group
_lowerCAmelCase : str = num_codevector_groups
_lowerCAmelCase : Optional[int] = contrastive_logits_temperature
_lowerCAmelCase : Optional[int] = feat_quantizer_dropout
_lowerCAmelCase : Optional[int] = num_negatives
_lowerCAmelCase : Union[str, Any] = codevector_dim
_lowerCAmelCase : Any = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Tuple = ctc_loss_reduction
_lowerCAmelCase : Tuple = ctc_zero_infinity
# adapter
_lowerCAmelCase : List[Any] = add_adapter
_lowerCAmelCase : List[str] = adapter_kernel_size
_lowerCAmelCase : str = adapter_stride
_lowerCAmelCase : List[str] = num_adapter_layers
_lowerCAmelCase : str = output_hidden_size or hidden_size
_lowerCAmelCase : Tuple = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : str = list(__a)
_lowerCAmelCase : Union[str, Any] = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : Tuple = xvector_output_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 36
| 0
|
import os
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = os.path.dirname(os.path.realpath(_lowerCamelCase ) )
__lowerCamelCase = os.path.join(_lowerCamelCase , 'triangle.txt' )
with open(_lowerCamelCase ) as f:
__lowerCamelCase = f.readlines()
__lowerCamelCase = []
for line in triangle:
__lowerCamelCase = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(_lowerCamelCase ) )
a.append(_lowerCamelCase )
for i in range(1 , len(_lowerCamelCase ) ):
for j in range(len(a[i] ) ):
__lowerCamelCase = a[i - 1][j] if j != len(a[i - 1] ) else 0
__lowerCamelCase = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_lowerCamelCase , _lowerCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 90
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , a , )
class UpperCAmelCase_ ( a):
lowerCamelCase__ = RobertaConfig
lowerCamelCase__ = 'roberta'
def __init__( self, __a):
'''simple docstring'''
super().__init__(__a)
_lowerCAmelCase : Optional[Any] = RobertaEmbeddings(__a)
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , a , )
class UpperCAmelCase_ ( a):
lowerCamelCase__ = RobertaConfig
lowerCamelCase__ = 'roberta'
def __init__( self, __a):
'''simple docstring'''
super().__init__(__a)
_lowerCAmelCase : Optional[int] = config.num_labels
_lowerCAmelCase : Optional[int] = config.num_hidden_layers
_lowerCAmelCase : Optional[int] = DeeRobertaModel(__a)
_lowerCAmelCase : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob)
_lowerCAmelCase : List[str] = nn.Linear(config.hidden_size, self.config.num_labels)
@add_start_docstrings_to_model_forward(__a)
def snake_case__ ( self, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=-1, __a=False, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.num_layers
try:
_lowerCAmelCase : List[Any] = self.roberta(
__a, attention_mask=__a, token_type_ids=__a, position_ids=__a, head_mask=__a, inputs_embeds=__a, )
_lowerCAmelCase : List[Any] = outputs[1]
_lowerCAmelCase : Dict = self.dropout(__a)
_lowerCAmelCase : Dict = self.classifier(__a)
_lowerCAmelCase : Optional[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : Union[str, Any] = e.exit_layer
_lowerCAmelCase : List[Any] = outputs[0]
if not self.training:
_lowerCAmelCase : int = entropy(__a)
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : str = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Optional[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1), labels.view(-1))
else:
_lowerCAmelCase : Optional[Any] = CrossEntropyLoss()
_lowerCAmelCase : Optional[Any] = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
# work with highway exits
_lowerCAmelCase : Optional[int] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Any = highway_exit[0]
if not self.training:
highway_logits_all.append(__a)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[str] = MSELoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1), labels.view(-1))
else:
_lowerCAmelCase : Dict = CrossEntropyLoss()
_lowerCAmelCase : Optional[Any] = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1))
highway_losses.append(__a)
if train_highway:
_lowerCAmelCase : int = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Optional[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 36
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 340
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'vision-encoder-decoder'
lowerCamelCase__ = True
def __init__( self, **__a):
'''simple docstring'''
super().__init__(**__a)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"A configuraton of type {self.model_type} cannot be instantiated because "
f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}")
_lowerCAmelCase : str = kwargs.pop("encoder")
_lowerCAmelCase : Any = encoder_config.pop("model_type")
_lowerCAmelCase : str = kwargs.pop("decoder")
_lowerCAmelCase : List[str] = decoder_config.pop("model_type")
_lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Optional[int] = True
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config")
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : str = True
return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[str] = self.encoder.to_dict()
_lowerCAmelCase : List[str] = self.decoder.to_dict()
_lowerCAmelCase : Any = self.__class__.model_type
return output
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}})
class UpperCAmelCase_ ( a):
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = OrderedDict()
_lowerCAmelCase : Any = {0: "batch", 1: "past_decoder_sequence + sequence"}
_lowerCAmelCase : List[str] = {0: "batch", 1: "past_decoder_sequence + sequence"}
_lowerCAmelCase : Optional[Any] = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def snake_case__ ( self, __a, __a = -1, __a = -1, __a = False, __a = None, ):
'''simple docstring'''
import torch
_lowerCAmelCase : Optional[Any] = OrderedDict()
_lowerCAmelCase : List[str] = super().generate_dummy_inputs(
__a, batch_size=__a, seq_length=__a, is_pair=__a, framework=__a)
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = dummy_input["input_ids"].shape
_lowerCAmelCase : str = (batch, encoder_sequence, self._config.encoder_hidden_size)
_lowerCAmelCase : List[str] = dummy_input.pop("input_ids")
_lowerCAmelCase : List[str] = dummy_input.pop("attention_mask")
_lowerCAmelCase : Optional[int] = torch.zeros(__a)
return common_inputs
class UpperCAmelCase_ ( a):
@property
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self, __a):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(__a)
def snake_case__ ( self, __a, __a, __a = "default"):
'''simple docstring'''
_lowerCAmelCase : Dict = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__a, __a)
| 36
| 0
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : int = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """time_series_transformer"""
lowerCAmelCase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__(self :Tuple , _UpperCamelCase :List[Any] = None , _UpperCamelCase :int = None , _UpperCamelCase :int = "student_t" , _UpperCamelCase :List[Any] = "nll" , _UpperCamelCase :Optional[Any] = 1 , _UpperCamelCase :Union[str, Any] = [1, 2, 3, 4, 5, 6, 7] , _UpperCamelCase :Tuple = "mean" , _UpperCamelCase :Union[str, Any] = 0 , _UpperCamelCase :Tuple = 0 , _UpperCamelCase :Dict = 0 , _UpperCamelCase :str = 0 , _UpperCamelCase :Dict = None , _UpperCamelCase :Any = None , _UpperCamelCase :Dict = 32 , _UpperCamelCase :int = 32 , _UpperCamelCase :Any = 2 , _UpperCamelCase :Any = 2 , _UpperCamelCase :Dict = 2 , _UpperCamelCase :Tuple = 2 , _UpperCamelCase :Optional[Any] = True , _UpperCamelCase :List[Any] = "gelu" , _UpperCamelCase :List[Any] = 64 , _UpperCamelCase :Dict = 0.1 , _UpperCamelCase :str = 0.1 , _UpperCamelCase :Union[str, Any] = 0.1 , _UpperCamelCase :Tuple = 0.1 , _UpperCamelCase :List[Any] = 0.1 , _UpperCamelCase :str = 100 , _UpperCamelCase :int = 0.0_2 , _UpperCamelCase :Tuple=True , **_UpperCamelCase :Optional[int] , )-> str:
__A = prediction_length
__A = context_length or prediction_length
__A = distribution_output
__A = loss
__A = input_size
__A = num_time_features
__A = lags_sequence
__A = scaling
__A = num_dynamic_real_features
__A = num_static_real_features
__A = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__A = cardinality
else:
__A = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__A = embedding_dimension
else:
__A = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__A = num_parallel_samples
# Transformer architecture configuration
__A = input_size * len(__a ) + self._number_of_features
__A = d_model
__A = encoder_attention_heads
__A = decoder_attention_heads
__A = encoder_ffn_dim
__A = decoder_ffn_dim
__A = encoder_layers
__A = decoder_layers
__A = dropout
__A = attention_dropout
__A = activation_dropout
__A = encoder_layerdrop
__A = decoder_layerdrop
__A = activation_function
__A = init_std
__A = use_cache
super().__init__(is_encoder_decoder=__a , **__a )
@property
def _lowerCAmelCase (self :Dict )-> Dict:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 117
|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase_ ( a):
def __get__( self, __a, __a=None):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute")
_lowerCAmelCase : List[Any] = "__cached_" + self.fget.__name__
_lowerCAmelCase : Dict = getattr(__a, __a, __a)
if cached is None:
_lowerCAmelCase : str = self.fget(__a)
setattr(__a, __a, __a)
return cached
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"invalid truth value {val!r}" )
def A ( _lowerCamelCase ):
'''simple docstring'''
if is_torch_fx_proxy(_lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(_lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(_lowerCamelCase , np.ndarray )
def A ( _lowerCamelCase ):
'''simple docstring'''
return isinstance(_lowerCamelCase , np.ndarray )
def A ( _lowerCamelCase ):
'''simple docstring'''
return _is_numpy(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import torch
return isinstance(_lowerCamelCase , torch.Tensor )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import torch
return isinstance(_lowerCamelCase , torch.device )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import torch
if isinstance(_lowerCamelCase , _lowerCamelCase ):
if hasattr(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase )
else:
return False
return isinstance(_lowerCamelCase , torch.dtype )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import tensorflow as tf
return isinstance(_lowerCamelCase , tf.Tensor )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_lowerCamelCase , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(_lowerCamelCase )
return type(_lowerCamelCase ) == tf.Tensor
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(_lowerCamelCase , jnp.ndarray )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_flax_available() else _is_jax(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
if isinstance(_lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple) ):
return [to_py_obj(_lowerCamelCase ) for o in obj]
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase ).tolist()
elif isinstance(_lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def A ( _lowerCamelCase ):
'''simple docstring'''
if isinstance(_lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple) ):
return np.array(_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase )
else:
return obj
class UpperCAmelCase_ ( a):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = fields(self)
# Safety and consistency checks
if not len(__a):
raise ValueError(f"{self.__class__.__name__} has no fields.")
if not all(field.default is None for field in class_fields[1:]):
raise ValueError(f"{self.__class__.__name__} should not have more than one required field.")
_lowerCAmelCase : Dict = getattr(self, class_fields[0].name)
_lowerCAmelCase : str = all(getattr(self, field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(__a):
if isinstance(__a, __a):
_lowerCAmelCase : Tuple = first_field.items()
_lowerCAmelCase : Dict = True
else:
try:
_lowerCAmelCase : Dict = iter(__a)
_lowerCAmelCase : Any = True
except TypeError:
_lowerCAmelCase : Any = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__a):
if (
not isinstance(__a, (list, tuple))
or not len(__a) == 2
or not isinstance(element[0], __a)
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_lowerCAmelCase : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"Cannot set key/value for {element}. It needs to be a tuple (key, value).")
break
setattr(self, element[0], element[1])
if element[1] is not None:
_lowerCAmelCase : Any = element[1]
elif first_field is not None:
_lowerCAmelCase : Any = first_field
else:
for field in class_fields:
_lowerCAmelCase : Dict = getattr(self, field.name)
if v is not None:
_lowerCAmelCase : Union[str, Any] = v
def __delitem__( self, *__a, **__a):
'''simple docstring'''
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.")
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.")
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.")
def __getitem__( self, __a):
'''simple docstring'''
if isinstance(__a, __a):
_lowerCAmelCase : Optional[int] = dict(self.items())
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self, __a, __a):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__a, __a)
super().__setattr__(__a, __a)
def __setitem__( self, __a, __a):
'''simple docstring'''
super().__setitem__(__a, __a)
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
return tuple(self[k] for k in self.keys())
class UpperCAmelCase_ ( a , a):
@classmethod
def snake_case__ ( cls, __a):
'''simple docstring'''
raise ValueError(
f"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}")
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'longest'
lowerCamelCase__ = 'max_length'
lowerCamelCase__ = 'do_not_pad'
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'pt'
lowerCamelCase__ = 'tf'
lowerCamelCase__ = 'np'
lowerCamelCase__ = 'jax'
class UpperCAmelCase_ :
def __init__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = context_managers
_lowerCAmelCase : Dict = ExitStack()
def __enter__( self):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(__a)
def __exit__( self, *__a, **__a):
'''simple docstring'''
self.stack.__exit__(*__a, **__a)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = infer_framework(_lowerCamelCase )
if framework == "tf":
_lowerCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_lowerCAmelCase : str = inspect.signature(model_class.forward ) # PyTorch models
else:
_lowerCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = model_class.__name__
_lowerCAmelCase : Optional[Any] = infer_framework(_lowerCamelCase )
if framework == "tf":
_lowerCAmelCase : Dict = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_lowerCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
_lowerCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def A ( _lowerCamelCase , _lowerCamelCase = "" , _lowerCamelCase = "." ):
'''simple docstring'''
def _flatten_dict(_lowerCamelCase , _lowerCamelCase="" , _lowerCamelCase="." ):
for k, v in d.items():
_lowerCAmelCase : Dict = str(_lowerCamelCase ) + delimiter + str(_lowerCamelCase ) if parent_key else k
if v and isinstance(_lowerCamelCase , _lowerCamelCase ):
yield from flatten_dict(_lowerCamelCase , _lowerCamelCase , delimiter=_lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) )
@contextmanager
def A ( _lowerCamelCase , _lowerCamelCase = False ):
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def A ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.transpose(_lowerCamelCase , axes=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.T if axes is None else array.permute(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.transpose(_lowerCamelCase , perm=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.transpose(_lowerCamelCase , axes=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for transpose: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.reshape(_lowerCamelCase , _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.reshape(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.reshape(_lowerCamelCase , _lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.reshape(_lowerCamelCase , _lowerCamelCase )
else:
raise ValueError(F"Type not supported for reshape: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for squeeze: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.expand_dims(_lowerCamelCase , _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.unsqueeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(_lowerCamelCase , axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.expand_dims(_lowerCamelCase , axis=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.size(_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.numel()
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.size(_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return array.size
else:
raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(_lowerCamelCase , (tuple, list) ):
_lowerCAmelCase : List[Any] = [F"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
_lowerCAmelCase : Tuple = F"{repo_id}--{value}"
return auto_map
def A ( _lowerCamelCase ):
'''simple docstring'''
for base_class in inspect.getmro(_lowerCamelCase ):
_lowerCAmelCase : Tuple = base_class.__module__
_lowerCAmelCase : int = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"Could not infer framework from class {model_class}." )
| 36
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase : Dict =logging.get_logger(__name__)
UpperCAmelCase : Dict ={
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = """beit"""
def __init__( self , snake_case__=8192 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=0.1 , snake_case__=0.1 , snake_case__=True , snake_case__=[3, 5, 7, 11] , snake_case__=[1, 2, 3, 6] , snake_case__=True , snake_case__=0.4 , snake_case__=256 , snake_case__=1 , snake_case__=False , snake_case__=255 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**__a )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = use_mask_token
UpperCamelCase_ = use_absolute_position_embeddings
UpperCamelCase_ = use_relative_position_bias
UpperCamelCase_ = use_shared_relative_position_bias
UpperCamelCase_ = layer_scale_init_value
UpperCamelCase_ = drop_path_rate
UpperCamelCase_ = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCamelCase_ = out_indices
UpperCamelCase_ = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCamelCase_ = use_auxiliary_head
UpperCamelCase_ = auxiliary_loss_weight
UpperCamelCase_ = auxiliary_channels
UpperCamelCase_ = auxiliary_num_convs
UpperCamelCase_ = auxiliary_concat_input
UpperCamelCase_ = semantic_loss_ignore_index
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = version.parse("""1.11""" )
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
return 1e-4
| 128
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(_lowerCamelCase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = _distribute_shards(**_lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = _split_gen_kwargs(_lowerCamelCase , _lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(_lowerCamelCase ):
_number_of_shards_in_gen_kwargs(_lowerCamelCase )
else:
_lowerCAmelCase : Optional[int] = _number_of_shards_in_gen_kwargs(_lowerCamelCase )
assert out == expected
| 36
| 0
|
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase : List[str] = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __snake_case :
_a : Any= PegasusConfig
_a : Dict= {}
_a : List[Any]= "gelu"
def __init__( self ,snake_case ,snake_case=13 ,snake_case=7 ,snake_case=True ,snake_case=False ,snake_case=99 ,snake_case=32 ,snake_case=5 ,snake_case=4 ,snake_case=37 ,snake_case=0.1 ,snake_case=0.1 ,snake_case=20 ,snake_case=2 ,snake_case=1 ,snake_case=0 ,):
'''simple docstring'''
lowercase : Optional[int] = parent
lowercase : int = batch_size
lowercase : int = seq_length
lowercase : Dict = is_training
lowercase : List[str] = use_labels
lowercase : int = vocab_size
lowercase : Tuple = hidden_size
lowercase : Dict = num_hidden_layers
lowercase : Dict = num_attention_heads
lowercase : int = intermediate_size
lowercase : str = hidden_dropout_prob
lowercase : List[Any] = attention_probs_dropout_prob
lowercase : Any = max_position_embeddings
lowercase : List[str] = eos_token_id
lowercase : Any = pad_token_id
lowercase : Optional[Any] = bos_token_id
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ).clip(3 ,self.vocab_size )
lowercase : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) ,1 )
lowercase : Optional[Any] = np.concatenate([input_ids, eos_tensor] ,axis=1 )
lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase : str = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
lowercase : int = prepare_pegasus_inputs_dict(__a ,__a ,__a )
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = 20
lowercase : str = model_class_name(__a )
lowercase : int = model.encode(inputs_dict["""input_ids"""] )
lowercase : List[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowercase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] ,__a ,__a )
lowercase : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) ,dtype="""i4""" )
lowercase : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
lowercase : Dict = model.decode(
decoder_input_ids[:, :-1] ,__a ,decoder_attention_mask=__a ,past_key_values=__a ,decoder_position_ids=__a ,)
lowercase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" )
lowercase : Any = model.decode(
decoder_input_ids[:, -1:] ,__a ,decoder_attention_mask=__a ,past_key_values=outputs_cache.past_key_values ,decoder_position_ids=__a ,)
lowercase : List[str] = model.decode(__a ,__a )
lowercase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=f"Max diff is {diff}" )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Dict = 20
lowercase : Any = model_class_name(__a )
lowercase : List[str] = model.encode(inputs_dict["""input_ids"""] )
lowercase : Optional[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowercase : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] ,axis=-1 ,)
lowercase : Tuple = model.init_cache(decoder_input_ids.shape[0] ,__a ,__a )
lowercase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
lowercase : Optional[int] = model.decode(
decoder_input_ids[:, :-1] ,__a ,decoder_attention_mask=__a ,past_key_values=__a ,decoder_position_ids=__a ,)
lowercase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" )
lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] ,__a ,past_key_values=outputs_cache.past_key_values ,decoder_attention_mask=__a ,decoder_position_ids=__a ,)
lowercase : Optional[int] = model.decode(__a ,__a ,decoder_attention_mask=__a )
lowercase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=f"Max diff is {diff}" )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , ) -> Dict:
if attention_mask is None:
lowercase : Tuple = np.not_equal(_lowerCamelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowercase : str = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __snake_case ( lowerCAmelCase , unittest.TestCase ):
_a : int= (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_a : int= (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_a : Any= True
_a : Union[str, Any]= False
_a : Optional[Any]= False
_a : str= False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = FlaxPegasusModelTester(self )
lowercase : int = ConfigTester(self ,config_class=__a )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__a ,__a ,__a )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__a ,__a ,__a )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase : Dict = self._prepare_for_class(__a ,__a )
lowercase : Optional[int] = model_class(__a )
@jax.jit
def encode_jitted(snake_case ,snake_case=None ,**snake_case ):
return model.encode(input_ids=__a ,attention_mask=__a )
with self.subTest("""JIT Enabled""" ):
lowercase : List[Any] = encode_jitted(**__a ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowercase : Optional[int] = encode_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) ,len(__a ) )
for jitted_output, output in zip(__a ,__a ):
self.assertEqual(jitted_output.shape ,output.shape )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase : List[str] = model_class(__a )
lowercase : Optional[int] = model.encode(inputs_dict["""input_ids"""] ,inputs_dict["""attention_mask"""] )
lowercase : str = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(snake_case ,snake_case ,snake_case ):
return model.decode(
decoder_input_ids=__a ,decoder_attention_mask=__a ,encoder_outputs=__a ,)
with self.subTest("""JIT Enabled""" ):
lowercase : str = decode_jitted(**__a ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowercase : str = decode_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) ,len(__a ) )
for jitted_output, output in zip(__a ,__a ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase : Optional[int] = model_class_name.from_pretrained("""google/pegasus-large""" ,from_pt=__a )
lowercase : Dict = np.ones((1, 1) )
lowercase : List[str] = model(__a )
self.assertIsNotNone(__a )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
lowercase : Any = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
lowercase : Optional[int] = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
lowercase : List[Any] = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
lowercase : int = tokenizer(__a ,return_tensors="""np""" ,truncation=__a ,max_length=512 ,padding=__a )
lowercase : Union[str, Any] = model.generate(**__a ,num_beams=2 ).sequences
lowercase : Union[str, Any] = tokenizer.batch_decode(__a ,skip_special_tokens=__a )
assert tgt_text == decoded
| 20
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCAmelCase_ :
def __init__( self, __a = "cpu", __a = "openai/clip-vit-large-patch14"):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = device
_lowerCAmelCase : Optional[int] = CLIPTokenizerFast.from_pretrained(__a)
_lowerCAmelCase : Any = [0.48_145_466, 0.4_578_275, 0.40_821_073]
_lowerCAmelCase : Union[str, Any] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
_lowerCAmelCase : Tuple = torchvision.transforms.Normalize(self.image_mean, self.image_std)
_lowerCAmelCase : Optional[int] = torchvision.transforms.Resize(224)
_lowerCAmelCase : Dict = torchvision.transforms.CenterCrop(224)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.resize(__a)
_lowerCAmelCase : List[str] = self.center_crop(__a)
_lowerCAmelCase : Optional[Any] = self.normalize(__a)
return images
def __call__( self, __a=None, __a=None, **__a):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(text=__a, **__a)
_lowerCAmelCase : List[str] = self.preprocess_img(__a)
_lowerCAmelCase : Tuple = {key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class UpperCAmelCase_ ( nn.Module):
def __init__( self, __a=10, __a=0.01, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=False, __a=True, __a="image", __a=True, __a=False, __a=False, __a=False, ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : List[str] = device if device else get_device()
if vqgan:
_lowerCAmelCase : Union[str, Any] = vqgan
else:
_lowerCAmelCase : Optional[Any] = load_vqgan(self.device, conf_path=__a, ckpt_path=__a)
self.vqgan.eval()
if clip:
_lowerCAmelCase : str = clip
else:
_lowerCAmelCase : int = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
self.clip.to(self.device)
_lowerCAmelCase : Optional[int] = ProcessorGradientFlow(device=self.device)
_lowerCAmelCase : Any = iterations
_lowerCAmelCase : List[Any] = lr
_lowerCAmelCase : Tuple = log
_lowerCAmelCase : List[str] = make_grid
_lowerCAmelCase : int = return_val
_lowerCAmelCase : Dict = quantize
_lowerCAmelCase : Any = self.vqgan.decoder.z_shape
def snake_case__ ( self, __a=None, __a=None, __a=5, __a=True):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
if output_path is None:
_lowerCAmelCase : List[Any] = "./animation.gif"
if input_path is None:
_lowerCAmelCase : str = self.save_path
_lowerCAmelCase : str = sorted(glob(input_path + "/*"))
if not len(__a):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)")
if len(__a) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)")
_lowerCAmelCase : Optional[int] = total_duration / len(__a)
_lowerCAmelCase : Union[str, Any] = [frame_duration] * len(__a)
if extend_frames:
_lowerCAmelCase : Any = 1.5
_lowerCAmelCase : List[str] = 3
for file_name in paths:
if file_name.endswith(".png"):
images.append(imageio.imread(__a))
imageio.mimsave(__a, __a, duration=__a)
print(f"gif saved to {output_path}")
def snake_case__ ( self, __a=None, __a=None):
'''simple docstring'''
if not (path or img):
raise ValueError("Input either path or tensor")
if img is not None:
raise NotImplementedError
_lowerCAmelCase : Dict = preprocess(Image.open(__a), target_image_size=256).to(self.device)
_lowerCAmelCase : Dict = preprocess_vqgan(__a)
_lowerCAmelCase , *_lowerCAmelCase : str = self.vqgan.encode(__a)
return z
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.latent.detach().requires_grad_()
_lowerCAmelCase : Dict = base_latent + transform_vector
if self.quantize:
_lowerCAmelCase , *_lowerCAmelCase : List[Any] = self.vqgan.quantize(__a)
else:
_lowerCAmelCase : Any = trans_latent
return self.vqgan.decode(__a)
def snake_case__ ( self, __a, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : int = self.clip_preprocessor(text=__a, images=__a, return_tensors="pt", padding=__a)
_lowerCAmelCase : Optional[int] = self.clip(**__a)
_lowerCAmelCase : Any = clip_outputs.logits_per_image
if weights is not None:
_lowerCAmelCase : Tuple = similarity_logits * weights
return similarity_logits.sum()
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self._get_clip_similarity(pos_prompts["prompts"], __a, weights=(1 / pos_prompts["weights"]))
if neg_prompts:
_lowerCAmelCase : List[Any] = self._get_clip_similarity(neg_prompts["prompts"], __a, weights=neg_prompts["weights"])
else:
_lowerCAmelCase : Union[str, Any] = torch.tensor([1], device=self.device)
_lowerCAmelCase : List[str] = -torch.log(__a) + torch.log(__a)
return loss
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.randn_like(self.latent, requires_grad=__a, device=self.device)
_lowerCAmelCase : Optional[int] = torch.optim.Adam([vector], lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
_lowerCAmelCase : Any = self._add_vector(__a)
_lowerCAmelCase : Optional[Any] = loop_post_process(__a)
_lowerCAmelCase : Optional[Any] = self._get_CLIP_loss(__a, __a, __a)
print("CLIP loss", __a)
if self.log:
wandb.log({"CLIP Loss": clip_loss})
clip_loss.backward(retain_graph=__a)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
wandb.init(reinit=__a, project="face-editor")
wandb.config.update({"Positive Prompts": positive_prompts})
wandb.config.update({"Negative Prompts": negative_prompts})
wandb.config.update({"lr": self.lr, "iterations": self.iterations})
if image_path:
_lowerCAmelCase : str = Image.open(__a)
_lowerCAmelCase : int = image.resize((256, 256))
wandb.log("Original Image", wandb.Image(__a))
def snake_case__ ( self, __a):
'''simple docstring'''
if not prompts:
return []
_lowerCAmelCase : int = []
_lowerCAmelCase : List[str] = []
if isinstance(__a, __a):
_lowerCAmelCase : Union[str, Any] = [prompt.strip() for prompt in prompts.split("|")]
for prompt in prompts:
if isinstance(__a, (tuple, list)):
_lowerCAmelCase : Optional[Any] = prompt[0]
_lowerCAmelCase : Union[str, Any] = float(prompt[1])
elif ":" in prompt:
_lowerCAmelCase , _lowerCAmelCase : int = prompt.split(":")
_lowerCAmelCase : Optional[Any] = float(__a)
else:
_lowerCAmelCase : Optional[int] = prompt
_lowerCAmelCase : List[Any] = 1.0
processed_prompts.append(__a)
weights.append(__a)
return {
"prompts": processed_prompts,
"weights": torch.tensor(__a, device=self.device),
}
def snake_case__ ( self, __a, __a=None, __a=None, __a=True, __a=False, __a=True, __a=True, __a=None, ):
'''simple docstring'''
if image_path:
_lowerCAmelCase : List[Any] = self._get_latent(__a)
else:
_lowerCAmelCase : Any = torch.randn(self.latent_dim, device=self.device)
if self.log:
self._init_logging(__a, __a, __a)
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCAmelCase : int = self.process_prompts(__a)
_lowerCAmelCase : List[str] = self.process_prompts(__a)
if save_final and save_path is None:
_lowerCAmelCase : int = os.path.join("./outputs/", "_".join(pos_prompts["prompts"]))
if not os.path.exists(__a):
os.makedirs(__a)
else:
_lowerCAmelCase : Tuple = save_path + "_" + get_timestamp()
os.makedirs(__a)
_lowerCAmelCase : Tuple = save_path
_lowerCAmelCase : List[Any] = self.vqgan.decode(self.latent)[0]
if show_intermediate:
print("Original Image")
show_pil(custom_to_pil(__a))
_lowerCAmelCase : int = loop_post_process(__a)
for iter, transformed_img in enumerate(self._optimize_CLIP(__a, __a, __a)):
if show_intermediate:
show_pil(__a)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png"))
if self.log:
wandb.log({"Image": wandb.Image(__a)})
if show_final:
show_pil(__a)
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png"))
| 36
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a__ : List[Any] = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 54
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_snake_case = get_tests_dir("fixtures")
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = mock.Mock()
_lowerCAmelCase : int = 500
_lowerCAmelCase : Tuple = {}
_lowerCAmelCase : str = HTTPError
_lowerCAmelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
_lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request", return_value=__a) as mock_head:
_lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# This check we did call the fake head request
mock_head.assert_called()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json")
def snake_case__ ( self):
'''simple docstring'''
with self.assertRaises(__a):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase : int = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants")
_lowerCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants", subfolder="feature_extractor")
self.assertIsNotNone(__a)
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase):
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TOKEN
HfFolder.save_token(__a)
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id="test-image-processor")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-image-processor-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="test-dynamic-image-processor")
except HTTPError:
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(__a)
image_processor.push_to_hub("test-image-processor", use_auth_token=self._token)
_lowerCAmelCase : str = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a))
# Reset repo
delete_repo(token=self._token, repo_id="test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__a, repo_id="test-image-processor", push_to_hub=__a, use_auth_token=self._token)
_lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = ViTImageProcessor.from_pretrained(__a)
image_processor.push_to_hub("valid_org/test-image-processor", use_auth_token=self._token)
_lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained("valid_org/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a))
# Reset repo
delete_repo(token=self._token, repo_id="valid_org/test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__a, repo_id="valid_org/test-image-processor-org", push_to_hub=__a, use_auth_token=self._token)
_lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org")
for k, v in image_processor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a))
def snake_case__ ( self):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
_lowerCAmelCase : List[str] = CustomImageProcessor.from_pretrained(__a)
image_processor.push_to_hub("test-dynamic-image-processor", use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"}, )
_lowerCAmelCase : Tuple = AutoImageProcessor.from_pretrained(
f"{USER}/test-dynamic-image-processor", trust_remote_code=__a)
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, "CustomImageProcessor")
| 36
| 0
|
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __lowerCamelCase ( __UpperCamelCase = 8 ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : int = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
i -= len(_lowerCamelCase )
lowerCAmelCase_ : int = i // 3
lowerCAmelCase_ : Any = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCAmelCase_ : Union[str, Any] = (
chars_incl
+ random(_lowerCamelCase , quotient + remainder )
+ random(_lowerCamelCase , _lowerCamelCase )
+ random(_lowerCamelCase , _lowerCamelCase )
)
lowerCAmelCase_ : Tuple = list(_lowerCamelCase )
shuffle(_lowerCamelCase )
return "".join(_lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> str:
"""simple docstring"""
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
pass # Put your code here...
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Any:
"""simple docstring"""
pass # Put your code here...
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
"""simple docstring"""
pass # Put your code here...
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase = 8 ) -> str:
"""simple docstring"""
if len(_lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCAmelCase_ : Tuple = any(char in ascii_uppercase for char in password )
lowerCAmelCase_ : Optional[int] = any(char in ascii_lowercase for char in password )
lowerCAmelCase_ : List[str] = any(char in digits for char in password )
lowerCAmelCase_ : Any = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = int(input("Please indicate the max length of your password: " ).strip() )
lowerCAmelCase_ : Optional[Any] = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowerCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowerCamelCase , _lowerCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 241
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=24, __a=2, __a=6, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=None, __a=1000, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : int = seq_length
_lowerCAmelCase : Optional[int] = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : List[str] = use_token_type_ids
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : int = max_position_embeddings
_lowerCAmelCase : Optional[int] = type_vocab_size
_lowerCAmelCase : Optional[Any] = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : List[Any] = num_labels
_lowerCAmelCase : Tuple = scope
_lowerCAmelCase : str = range_bbox
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : Dict = bbox[i, j, 3]
_lowerCAmelCase : int = bbox[i, j, 1]
_lowerCAmelCase : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : str = bbox[i, j, 2]
_lowerCAmelCase : List[Any] = bbox[i, j, 0]
_lowerCAmelCase : str = t
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
_lowerCAmelCase : Dict = None
if self.use_token_type_ids:
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : Optional[int] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = LiltModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(__a, bbox=__a, attention_mask=__a, token_type_ids=__a)
_lowerCAmelCase : str = model(__a, bbox=__a, token_type_ids=__a)
_lowerCAmelCase : List[Any] = model(__a, bbox=__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Optional[Any] = LiltForTokenClassification(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(
__a, bbox=__a, attention_mask=__a, token_type_ids=__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = LiltForQuestionAnswering(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Tuple = model(
__a, bbox=__a, attention_mask=__a, token_type_ids=__a, start_positions=__a, end_positions=__a, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , a , unittest.TestCase):
lowerCamelCase__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
return True
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = LiltModelTester(self)
_lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : Any = type
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = LiltModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_torch
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__a)
_lowerCAmelCase : Any = torch.tensor([[1, 2]], device=__a)
_lowerCAmelCase : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]], device=__a)
# forward pass
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(input_ids=__a, bbox=__a)
_lowerCAmelCase : Optional[int] = torch.Size([1, 2, 768])
_lowerCAmelCase : List[str] = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]], device=__a, )
self.assertTrue(outputs.last_hidden_state.shape, __a)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3], __a, atol=1E-3))
| 36
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_snake_case = logging.get_logger(__name__)
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Dict = ['input_features', 'attention_mask']
def __init__( self , _UpperCamelCase=80 , _UpperCamelCase=16000 , _UpperCamelCase=0.0 , _UpperCamelCase=10 , _UpperCamelCase=25 , _UpperCamelCase="hamming_window" , _UpperCamelCase=32768.0 , _UpperCamelCase=0.9_7 , _UpperCamelCase=1.0 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(feature_size=__a , sampling_rate=__a , padding_value=__a , **__a )
_lowercase : int = feature_size
_lowercase : Optional[Any] = sampling_rate
_lowercase : Tuple = padding_value
_lowercase : int = hop_length
_lowercase : str = win_length
_lowercase : str = frame_signal_scale
_lowercase : Union[str, Any] = preemphasis_coeff
_lowercase : Optional[Any] = mel_floor
_lowercase : str = normalize_means
_lowercase : Union[str, Any] = normalize_vars
_lowercase : List[str] = win_function
_lowercase : str = return_attention_mask
_lowercase : str = win_length * sampling_rate // 1000
_lowercase : Union[str, Any] = hop_length * sampling_rate // 1000
_lowercase : Tuple = optimal_fft_length(self.sample_size )
_lowercase : Tuple = (self.n_fft // 2) + 1
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
if self.win_function == "hamming_window":
_lowercase : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=__a )
else:
_lowercase : Union[str, Any] = window_function(window_length=self.sample_size , name=self.win_function )
_lowercase : List[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
_lowercase : Optional[Any] = spectrogram(
one_waveform * self.frame_signal_scale , window=__a , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__a , preemphasis=self.preemphasis_coeff , mel_filters=__a , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if self.normalize_means:
_lowercase : Dict = x[:input_length].mean(axis=0 )
_lowercase : Optional[Any] = np.subtract(__a , __a )
if self.normalize_vars:
_lowercase : List[str] = x[:input_length].std(axis=0 )
_lowercase : Optional[int] = np.divide(__a , __a )
if input_length < x.shape[0]:
_lowercase : Union[str, Any] = padding_value
# make sure array is in float32
_lowercase : Optional[int] = x.astype(np.floataa )
return x
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
_lowercase : int = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__a , __a , self.padding_value ) for x, n in zip(__a , __a )]
def __call__( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_lowercase : List[Any] = isinstance(__a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : int = is_batched_numpy or (
isinstance(__a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : Any = [np.asarray(__a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__a , np.ndarray ):
_lowercase : Tuple = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : Optional[Any] = [raw_speech]
# extract fbank features
_lowercase : List[str] = [self._extract_mfsc_features(__a ) for one_waveform in raw_speech]
# convert into correct format for padding
_lowercase : int = BatchFeature({"input_features": features} )
_lowercase : str = self.pad(
__a , padding=__a , max_length=__a , truncation=__a , pad_to_multiple_of=__a , return_attention_mask=__a , **__a , )
# make sure list is in array format
_lowercase : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0] , __a ):
_lowercase : int = [np.asarray(__a , dtype=np.floataa ) for feature in input_features]
_lowercase : str = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_lowercase : Optional[Any] = [np.asarray(__a , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowercase : int = (
np.array(__a , dtype=np.intaa )
if self._get_padding_strategies(__a , max_length=__a ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowercase : List[Any] = self.normalize(
padded_inputs["input_features"] , attention_mask=__a )
if return_tensors is not None:
_lowercase : Tuple = padded_inputs.convert_to_tensors(__a )
return padded_inputs
| 250
|
import argparse
import copy
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = {}
with open(_lowerCamelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_lowerCAmelCase : Tuple = []
_list.append([line.split()[1], line.split()[2]] )
_lowerCAmelCase : Any = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_lowerCAmelCase : str = []
_list.append([line.split()[0], line.split()[2]] )
_lowerCAmelCase : Any = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase ) as f:
_lowerCAmelCase : str = f.read(1 )
_lowerCAmelCase : str = start_node
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Any = start_node
_lowerCAmelCase : str = 0
while visiting not in first_solution:
_lowerCAmelCase : Dict = 10_000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_lowerCamelCase ) and k[0] not in first_solution:
_lowerCAmelCase : List[str] = k[1]
_lowerCAmelCase : List[Any] = k[0]
first_solution.append(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = distance_of_first_solution + int(_lowerCamelCase )
_lowerCAmelCase : str = best_node
first_solution.append(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_lowerCAmelCase : Tuple = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10_000
)
return first_solution, distance_of_first_solution
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = []
for n in solution[1:-1]:
_lowerCAmelCase : Dict = solution.index(_lowerCamelCase )
for kn in solution[1:-1]:
_lowerCAmelCase : Dict = solution.index(_lowerCamelCase )
if n == kn:
continue
_lowerCAmelCase : Optional[int] = copy.deepcopy(_lowerCamelCase )
_lowerCAmelCase : int = kn
_lowerCAmelCase : Dict = n
_lowerCAmelCase : Optional[int] = 0
for k in _tmp[:-1]:
_lowerCAmelCase : str = _tmp[_tmp.index(_lowerCamelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_lowerCAmelCase : Optional[Any] = distance + int(i[1] )
_tmp.append(_lowerCamelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_lowerCAmelCase : List[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _lowerCamelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : int = first_solution
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Tuple = distance_of_first_solution
_lowerCAmelCase : Optional[int] = solution
while count <= iters:
_lowerCAmelCase : int = find_neighborhood(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = neighborhood[index_of_best_solution]
_lowerCAmelCase : int = len(_lowerCamelCase ) - 1
_lowerCAmelCase : Union[str, Any] = False
while not found:
_lowerCAmelCase : Tuple = 0
while i < len(_lowerCamelCase ):
if best_solution[i] != solution[i]:
_lowerCAmelCase : str = best_solution[i]
_lowerCAmelCase : Tuple = solution[i]
break
_lowerCAmelCase : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Optional[Any] = best_solution[:-1]
_lowerCAmelCase : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_lowerCAmelCase : Union[str, Any] = cost
_lowerCAmelCase : List[Any] = solution
else:
_lowerCAmelCase : Optional[Any] = index_of_best_solution + 1
_lowerCAmelCase : Optional[Any] = neighborhood[index_of_best_solution]
if len(_lowerCamelCase ) >= size:
tabu_list.pop(0 )
_lowerCAmelCase : int = count + 1
return best_solution_ever, best_cost
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = generate_neighbours(args.File )
_lowerCAmelCase , _lowerCAmelCase : List[str] = generate_first_solution(
args.File , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Any = tabu_search(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 36
| 0
|
"""simple docstring"""
def UpperCAmelCase ( UpperCamelCase__ = 50_000_000 ):
"""simple docstring"""
A__ = set()
A__ = int((limit - 24) ** (1 / 2) )
A__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , _lowerCamelCase ) ) )
for primea in primes:
A__ = primea * primea
for primea in primes:
A__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
A__ = primea * primea * primea * primea
A__ = square + cube + tetr
if total >= limit:
break
ret.add(_lowerCamelCase )
return len(_lowerCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 221
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = BartphoTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : str = ["▁This", "▁is", "▁a", "▁t", "est"]
_lowerCAmelCase : List[str] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file, "w", encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n")
_lowerCAmelCase : Optional[Any] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self, **__a):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "This is a là test"
_lowerCAmelCase : Optional[int] = "This is a<unk><unk> test"
return input_text, output_text
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map)
_lowerCAmelCase : List[Any] = "This is a là test"
_lowerCAmelCase : str = "▁This ▁is ▁a ▁l à ▁t est".split()
_lowerCAmelCase : str = tokenizer.tokenize(__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a), __a)
| 36
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : List[str] = ["pixel_values"]
def __init__( self, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = 1 / 255, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> List[str]:
super().__init__(**__a )
UpperCamelCase : Optional[int] = size if size is not None else {"height": 224, "width": 224}
UpperCamelCase : Optional[Any] = get_size_dict(__a )
UpperCamelCase : str = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCamelCase : Tuple = get_size_dict(__a, default_to_square=__a, param_name='crop_size' )
UpperCamelCase : Optional[int] = do_resize
UpperCamelCase : Optional[int] = do_rescale
UpperCamelCase : List[str] = do_normalize
UpperCamelCase : int = do_center_crop
UpperCamelCase : int = crop_size
UpperCamelCase : List[Any] = size
UpperCamelCase : Union[str, Any] = resample
UpperCamelCase : Union[str, Any] = rescale_factor
UpperCamelCase : List[str] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCamelCase : int = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> Tuple:
UpperCamelCase : Union[str, Any] = get_size_dict(__a )
if "shortest_edge" in size:
UpperCamelCase : Dict = get_resize_output_image_size(__a, size=size['shortest_edge'], default_to_square=__a )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCamelCase : int = (size["height"], size["width"])
else:
raise ValueError(F"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" )
return resize(__a, size=__a, resample=__a, data_format=__a, **__a )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> Any:
UpperCamelCase : Tuple = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__a, size=(size['height'], size['width']), data_format=__a, **__a )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_ ) -> int:
return rescale(__a, scale=__a, data_format=__a, **__a )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> Any:
return normalize(__a, mean=__a, std=__a, data_format=__a, **__a )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
UpperCamelCase : int = do_resize if do_resize is not None else self.do_resize
UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
UpperCamelCase : Union[str, Any] = get_size_dict(__a, param_name='crop_size', default_to_square=__a )
UpperCamelCase : int = resample if resample is not None else self.resample
UpperCamelCase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean
UpperCamelCase : str = image_std if image_std is not None else self.image_std
UpperCamelCase : str = size if size is not None else self.size
UpperCamelCase : Union[str, Any] = get_size_dict(__a )
if not is_batched(__a ):
UpperCamelCase : int = [images]
if not valid_images(__a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
UpperCamelCase : str = [to_numpy_array(__a ) for image in images]
if do_resize:
UpperCamelCase : Union[str, Any] = [self.resize(image=__a, size=__a, resample=__a ) for image in images]
if do_center_crop:
UpperCamelCase : Optional[int] = [self.center_crop(image=__a, size=__a ) for image in images]
if do_rescale:
UpperCamelCase : Dict = [self.rescale(image=__a, scale=__a ) for image in images]
if do_normalize:
UpperCamelCase : Optional[int] = [self.normalize(image=__a, mean=__a, std=__a ) for image in images]
UpperCamelCase : str = [to_channel_dimension_format(__a, __a ) for image in images]
UpperCamelCase : str = {"pixel_values": images}
return BatchFeature(data=__a, tensor_type=__a )
| 119
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def constraint_to_multiple_of(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=None ):
_lowerCAmelCase : Tuple = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
_lowerCAmelCase : List[str] = math.ceil(val / multiple ) * multiple
return x
_lowerCAmelCase : Union[str, Any] = (output_size, output_size) if isinstance(_lowerCamelCase , _lowerCamelCase ) else output_size
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = get_image_size(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Any = output_size
# determine new height and width
_lowerCAmelCase : List[Any] = output_height / input_height
_lowerCAmelCase : Any = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowerCAmelCase : Union[str, Any] = scale_width
else:
# fit height
_lowerCAmelCase : Union[str, Any] = scale_height
_lowerCAmelCase : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCamelCase )
_lowerCAmelCase : Dict = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCamelCase )
return (new_height, new_width)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ['pixel_values']
def __init__( self, __a = True, __a = None, __a = PILImageResampling.BILINEAR, __a = False, __a = 1, __a = True, __a = 1 / 255, __a = True, __a = None, __a = None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Any = size if size is not None else {"height": 384, "width": 384}
_lowerCAmelCase : Optional[int] = get_size_dict(__a)
_lowerCAmelCase : Optional[Any] = do_resize
_lowerCAmelCase : Dict = size
_lowerCAmelCase : Any = keep_aspect_ratio
_lowerCAmelCase : str = ensure_multiple_of
_lowerCAmelCase : str = resample
_lowerCAmelCase : Dict = do_rescale
_lowerCAmelCase : Optional[int] = rescale_factor
_lowerCAmelCase : Dict = do_normalize
_lowerCAmelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self, __a, __a, __a = False, __a = 1, __a = PILImageResampling.BICUBIC, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}")
_lowerCAmelCase : List[Any] = get_resize_output_image_size(
__a, output_size=(size["height"], size["width"]), keep_aspect_ratio=__a, multiple=__a, )
return resize(__a, size=__a, resample=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return rescale(__a, scale=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return normalize(__a, mean=__a, std=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = ChannelDimension.FIRST, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : List[Any] = size if size is not None else self.size
_lowerCAmelCase : str = get_size_dict(__a)
_lowerCAmelCase : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowerCAmelCase : Any = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowerCAmelCase : int = resample if resample is not None else self.resample
_lowerCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : List[str] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : Optional[Any] = make_list_of_images(__a)
if not valid_images(__a):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
_lowerCAmelCase : List[Any] = [to_numpy_array(__a) for image in images]
if do_resize:
_lowerCAmelCase : Any = [self.resize(image=__a, size=__a, resample=__a) for image in images]
if do_rescale:
_lowerCAmelCase : List[str] = [self.rescale(image=__a, scale=__a) for image in images]
if do_normalize:
_lowerCAmelCase : Dict = [self.normalize(image=__a, mean=__a, std=__a) for image in images]
_lowerCAmelCase : List[str] = [to_channel_dimension_format(__a, __a) for image in images]
_lowerCAmelCase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=__a, tensor_type=__a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__a) != len(__a):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(__a):
_lowerCAmelCase : List[Any] = target_sizes.numpy()
_lowerCAmelCase : Dict = []
for idx in range(len(__a)):
_lowerCAmelCase : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=__a)
_lowerCAmelCase : int = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(__a)
else:
_lowerCAmelCase : Dict = logits.argmax(dim=1)
_lowerCAmelCase : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 36
| 0
|
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class UpperCamelCase_ (__A ):
__magic_name__ = ComputeEnvironment.AMAZON_SAGEMAKER
__magic_name__ = True
__magic_name__ = '''ml.p3.2xlarge'''
__magic_name__ = '''accelerate_sagemaker_execution_role'''
__magic_name__ = '''hf-sm'''
__magic_name__ = '''us-east-1'''
__magic_name__ = 1
__magic_name__ = '''accelerate-sagemaker-1'''
__magic_name__ = '''1.6'''
__magic_name__ = '''4.4'''
__magic_name__ = '''train.py'''
__magic_name__ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
__magic_name__ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , __a )
assert isinstance(converted_args["do_train"] , __a )
assert isinstance(converted_args["epochs"] , __a )
assert isinstance(converted_args["learning_rate"] , __a )
assert isinstance(converted_args["max_steps"] , __a )
with pytest.raises(__a ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 268
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = "huggingface/label-files"
_lowerCAmelCase : int = "imagenet-1k-id2label.json"
_lowerCAmelCase : Tuple = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : Tuple = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : Tuple = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_lowerCAmelCase : Optional[int] = BitConfig(
conv_layer=_lowerCamelCase , num_labels=1_000 , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , )
return config
def A ( _lowerCamelCase ):
'''simple docstring'''
if "stem.conv" in name:
_lowerCAmelCase : List[str] = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
_lowerCAmelCase : Any = name.replace("blocks" , "layers" )
if "head.fc" in name:
_lowerCAmelCase : Optional[Any] = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
_lowerCAmelCase : Any = "bit." + name
if "bit" not in name and "classifier" not in name:
_lowerCAmelCase : Dict = "bit.encoder." + name
return name
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = get_config(_lowerCamelCase )
# load original model from timm
_lowerCAmelCase : int = create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model
_lowerCAmelCase : Any = timm_model.state_dict()
for key in state_dict.copy().keys():
_lowerCAmelCase : Dict = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val.squeeze() if "head" in key else val
# load HuggingFace model
_lowerCAmelCase : Optional[Any] = BitForImageClassification(_lowerCamelCase )
model.eval()
model.load_state_dict(_lowerCamelCase )
# create image processor
_lowerCAmelCase : Dict = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = transform.transforms
_lowerCAmelCase : Tuple = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
_lowerCAmelCase : Tuple = BitImageProcessor(
do_resize=_lowerCamelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Any = transform(_lowerCamelCase ).unsqueeze(0 )
_lowerCAmelCase : Optional[int] = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
# verify logits
with torch.no_grad():
_lowerCAmelCase : Tuple = model(_lowerCamelCase )
_lowerCAmelCase : str = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
_lowerCAmelCase : Union[str, Any] = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(F"ybelkada/{model_name}" )
processor.push_to_hub(F"ybelkada/{model_name}" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
_snake_case = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 36
| 0
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=1024 , UpperCamelCase__ : Union[str, Any]=1024 , UpperCamelCase__ : int=False , **UpperCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = AutoTokenizer.from_pretrained(_lowerCamelCase )
__lowerCamelCase = SeqaSeqDataset(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , type_path='train' , **_lowerCamelCase )
__lowerCamelCase = tok.pad_token_id
def get_lens(UpperCamelCase__ : Tuple ):
__lowerCamelCase = tqdm(
DataLoader(_lowerCamelCase , batch_size=512 , num_workers=8 , shuffle=_lowerCamelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__lowerCamelCase = []
for batch in dl:
__lowerCamelCase = batch["input_ids"].ne(_lowerCamelCase ).sum(1 ).tolist()
__lowerCamelCase = batch["labels"].ne(_lowerCamelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(_lowerCamelCase , _lowerCamelCase ):
max_lens.append(max(_lowerCamelCase , _lowerCamelCase ) )
else:
max_lens.extend(_lowerCamelCase )
return max_lens
__lowerCamelCase = get_lens(_lowerCamelCase )
__lowerCamelCase = SeqaSeqDataset(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , type_path='val' , **_lowerCamelCase )
__lowerCamelCase = get_lens(_lowerCamelCase )
pickle_save(_lowerCamelCase , train_ds.len_file )
pickle_save(_lowerCamelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 90
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'swin'
lowerCamelCase__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=[2, 2, 6, 2], __a=[3, 6, 12, 24], __a=7, __a=4.0, __a=True, __a=0.0, __a=0.0, __a=0.1, __a="gelu", __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Any = image_size
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : Tuple = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Optional[Any] = len(__a)
_lowerCAmelCase : int = num_heads
_lowerCAmelCase : int = window_size
_lowerCAmelCase : int = mlp_ratio
_lowerCAmelCase : List[Any] = qkv_bias
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Any = drop_path_rate
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Tuple = use_absolute_embeddings
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Tuple = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : List[str] = int(embed_dim * 2 ** (len(__a) - 1))
_lowerCAmelCase : List[Any] = ["stem"] + [f"stage{idx}" for idx in range(1, len(__a) + 1)]
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 36
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 36
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
class A_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase__ = 2
@register_to_config
def __init__(self :int , _UpperCamelCase :Any = 0.0_2 , _UpperCamelCase :List[str] = 100 , _UpperCamelCase :Optional[Any] = 1.0_0_7 , _UpperCamelCase :Tuple = 80 , _UpperCamelCase :Optional[int] = 0.0_5 , _UpperCamelCase :List[str] = 50 , )-> Tuple:
__A = sigma_max
# setable values
__A = None
__A = None
__A = None # sigma(t_i)
def _lowerCAmelCase (self :Dict , _UpperCamelCase :Dict , _UpperCamelCase :Any = None )-> int:
return sample
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :List[str] , _UpperCamelCase :int = None )-> int:
__A = num_inference_steps
__A = np.arange(0 , self.num_inference_steps )[::-1].copy()
__A = torch.from_numpy(__a ).to(__a )
__A = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__A = torch.tensor(__a , dtype=torch.floataa , device=__a )
def _lowerCAmelCase (self :List[Any] , _UpperCamelCase :str , _UpperCamelCase :Any , _UpperCamelCase :Optional[int] = None )-> Any:
if self.config.s_min <= sigma <= self.config.s_max:
__A = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
__A = 0
# sample eps ~ N(0, S_noise^2 * I)
__A = self.config.s_noise * randn_tensor(sample.shape , generator=__a ).to(sample.device )
__A = sigma + gamma * sigma
__A = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _lowerCAmelCase (self :Any , _UpperCamelCase :Any , _UpperCamelCase :int , _UpperCamelCase :Any , _UpperCamelCase :List[str] , _UpperCamelCase :Dict = True , )-> str:
__A = sample_hat + sigma_hat * model_output
__A = (sample_hat - pred_original_sample) / sigma_hat
__A = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__a , derivative=__a , pred_original_sample=__a )
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :int , _UpperCamelCase :Optional[int] , _UpperCamelCase :int , _UpperCamelCase :str , _UpperCamelCase :List[Any] , _UpperCamelCase :Tuple , _UpperCamelCase :List[Any] = True , )-> str:
__A = sample_prev + sigma_prev * model_output
__A = (sample_prev - pred_original_sample) / sigma_prev
__A = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__a , derivative=__a , pred_original_sample=__a )
def _lowerCAmelCase (self :List[Any] , _UpperCamelCase :Optional[int] , _UpperCamelCase :int , _UpperCamelCase :Dict )-> Union[str, Any]:
raise NotImplementedError()
| 117
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
_snake_case = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if got_ver is None or want_ver is None:
raise ValueError(
F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
F" reinstalling {pkg}." )
if not ops[op](version.parse(_lowerCamelCase ) , version.parse(_lowerCamelCase ) ):
raise ImportError(
F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def A ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = F"\n{hint}" if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$" , _lowerCamelCase ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = requirement, None, None
else:
_lowerCAmelCase : Optional[int] = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , _lowerCamelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
F" got {requirement}" )
_lowerCAmelCase , _lowerCAmelCase : Dict = match[0]
_lowerCAmelCase : Any = want_full.split("," ) # there could be multiple requirements
_lowerCAmelCase : Optional[int] = {}
for w in want_range:
_lowerCAmelCase : Any = re.findall(r"^([\s!=<>]{1,2})(.+)" , _lowerCamelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
F" but got {requirement}" )
_lowerCAmelCase , _lowerCAmelCase : Tuple = match[0]
_lowerCAmelCase : Union[str, Any] = want_ver
if op not in ops:
raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
_lowerCAmelCase : Tuple = ".".join([str(_lowerCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return
# check if any version is installed
try:
_lowerCAmelCase : Any = importlib.metadata.version(_lowerCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(_lowerCamelCase , _lowerCamelCase )
| 36
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase : Optional[Any] =logging.get_logger(__name__)
UpperCAmelCase : List[Any] ={
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowercase (a_ , a_ ):
'''simple docstring'''
lowercase__ = """swin"""
lowercase__ = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , snake_case__=224 , snake_case__=4 , snake_case__=3 , snake_case__=96 , snake_case__=[2, 2, 6, 2] , snake_case__=[3, 6, 12, 24] , snake_case__=7 , snake_case__=4.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=False , snake_case__=0.02 , snake_case__=1e-5 , snake_case__=32 , snake_case__=None , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**__a )
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = embed_dim
UpperCamelCase_ = depths
UpperCamelCase_ = len(__a )
UpperCamelCase_ = num_heads
UpperCamelCase_ = window_size
UpperCamelCase_ = mlp_ratio
UpperCamelCase_ = qkv_bias
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = drop_path_rate
UpperCamelCase_ = hidden_act
UpperCamelCase_ = use_absolute_embeddings
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = initializer_range
UpperCamelCase_ = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase_ = int(embed_dim * 2 ** (len(__a ) - 1) )
UpperCamelCase_ = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(__a ) + 1 )]
UpperCamelCase_ = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = version.parse("""1.11""" )
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
return 1e-4
| 128
|
import argparse
from collections import defaultdict
import yaml
_snake_case = "docs/source/en/_toctree.yml"
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = defaultdict(_lowerCamelCase )
_lowerCAmelCase : Any = []
_lowerCAmelCase : List[str] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = new_doc_list
_lowerCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase : str = []
for duplicate_key in duplicates:
_lowerCAmelCase : List[str] = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(_lowerCamelCase ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
_lowerCAmelCase : Optional[Any] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowerCamelCase ) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed." )
overview_doc.extend(_lowerCamelCase )
# Sort
return overview_doc
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : int = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : List[str] = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : Union[str, Any] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowerCAmelCase : Optional[Any] = api_doc[scheduler_idx]["sections"]
_lowerCAmelCase : Optional[Any] = clean_doc_toc(_lowerCamelCase )
_lowerCAmelCase : int = False
if new_scheduler_doc != scheduler_doc:
_lowerCAmelCase : List[Any] = True
if overwrite:
_lowerCAmelCase : Dict = new_scheduler_doc
if diff:
if overwrite:
_lowerCAmelCase : Tuple = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : int = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : List[str] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Optional[int] = api_doc[pipeline_idx]["sections"]
_lowerCAmelCase : Tuple = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowerCAmelCase : List[Any] = pipeline_doc["section"]
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
if overwrite:
_lowerCAmelCase : Optional[Any] = new_sub_pipeline_doc
new_pipeline_docs.append(_lowerCamelCase )
# sort overall pipeline doc
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
if new_pipeline_docs != pipeline_docs:
_lowerCAmelCase : Dict = True
if overwrite:
_lowerCAmelCase : Optional[int] = new_pipeline_docs
if diff:
if overwrite:
_lowerCAmelCase : Optional[int] = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_snake_case = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 36
| 0
|
from __future__ import annotations
import bisect
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = -1 ) -> str:
if hi < 0:
lowercase : int = len(_lowerCamelCase )
while lo < hi:
lowercase : Optional[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowercase : Union[str, Any] = mid + 1
else:
lowercase : str = mid
return lo
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = -1 ) -> str:
if hi < 0:
lowercase : str = len(_lowerCamelCase )
while lo < hi:
lowercase : Tuple = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowercase : Dict = mid + 1
else:
lowercase : str = mid
return lo
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = -1 ) -> int:
sorted_collection.insert(bisect_left(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = -1 ) -> Any:
sorted_collection.insert(bisect_right(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
lowercase : int = 0
lowercase : Union[str, Any] = len(_lowerCamelCase ) - 1
while left <= right:
lowercase : int = left + (right - left) // 2
lowercase : int = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowercase : str = midpoint - 1
else:
lowercase : Any = midpoint + 1
return None
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
lowercase : Tuple = bisect.bisect_left(_lowerCamelCase , _lowerCamelCase )
if index != len(_lowerCamelCase ) and sorted_collection[index] == item:
return index
return None
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
if right < left:
return None
lowercase : Optional[int] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , midpoint + 1 , _lowerCamelCase )
if __name__ == "__main__":
lowercase : List[Any] = input("""Enter numbers separated by comma:\n""").strip()
lowercase : Optional[Any] = sorted(int(item) for item in user_input.split(""","""))
lowercase : Union[str, Any] = int(input("""Enter a single number to be found in the list:\n"""))
lowercase : List[str] = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 20
|
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
| 0
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Union[str, Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
a__ : List[str] = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
a__ : List[Any] = {'''facebook/blenderbot-3B''': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__SCREAMING_SNAKE_CASE = bs[:]
__SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
__SCREAMING_SNAKE_CASE = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = set()
__SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE = char
return pairs
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : str = VOCAB_FILES_NAMES
snake_case__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : int = ["input_ids", "attention_mask"]
def __init__( self : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple="replace" , UpperCAmelCase__ : Union[str, Any]="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : List[str]="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : Union[str, Any]="<unk>" , UpperCAmelCase__ : Optional[int]="<pad>" , UpperCAmelCase__ : Tuple="<mask>" , UpperCAmelCase__ : str=False , **UpperCAmelCase__ : Any , ) -> str:
__SCREAMING_SNAKE_CASE = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token
__SCREAMING_SNAKE_CASE = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
__SCREAMING_SNAKE_CASE = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token
__SCREAMING_SNAKE_CASE = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token
__SCREAMING_SNAKE_CASE = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
__SCREAMING_SNAKE_CASE = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
errors=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , **__a , )
with open(__a , encoding="utf-8" ) as vocab_handle:
__SCREAMING_SNAKE_CASE = json.load(__a )
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
__SCREAMING_SNAKE_CASE = bytes_to_unicode()
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(__a , encoding="utf-8" ) as merges_handle:
__SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1]
__SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges]
__SCREAMING_SNAKE_CASE = dict(zip(__a , range(len(__a ) ) ) )
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__SCREAMING_SNAKE_CASE = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
return len(self.encoder )
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> Optional[int]:
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE = tuple(__a )
__SCREAMING_SNAKE_CASE = get_pairs(__a )
if not pairs:
return token
while True:
__SCREAMING_SNAKE_CASE = min(__a , key=lambda UpperCAmelCase__ : self.bpe_ranks.get(__a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE = bigram
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
while i < len(__a ):
try:
__SCREAMING_SNAKE_CASE = word.index(__a , __a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__SCREAMING_SNAKE_CASE = tuple(__a )
__SCREAMING_SNAKE_CASE = new_word
if len(__a ) == 1:
break
else:
__SCREAMING_SNAKE_CASE = get_pairs(__a )
__SCREAMING_SNAKE_CASE = " ".join(__a )
__SCREAMING_SNAKE_CASE = word
return word
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[Any] ) -> int:
__SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , __a ):
__SCREAMING_SNAKE_CASE = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__a ).split(" " ) )
return bpe_tokens
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Any ) -> List[Any]:
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Any ) -> Tuple:
return self.decoder.get(__a )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : List[str] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = "".join(__a )
__SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict = None ) -> int:
if not os.path.isdir(__a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__SCREAMING_SNAKE_CASE = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + "\n" )
__SCREAMING_SNAKE_CASE = 0
with open(__a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__SCREAMING_SNAKE_CASE = token_index
writer.write(" ".join(__a ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = False ) -> Dict:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] = None ) -> str:
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any]=False , **UpperCAmelCase__ : Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__a ) > 0 and not text[0].isspace()):
__SCREAMING_SNAKE_CASE = " " + text
return (text, kwargs)
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any = None ) -> int:
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__a )
__SCREAMING_SNAKE_CASE = " ".join(__a )
__SCREAMING_SNAKE_CASE = self.encode(__a )
if len(__a ) > self.model_max_length:
__SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 54
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
_snake_case = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class UpperCAmelCase_ ( a):
def __init__( self, __a = 101):
'''simple docstring'''
_lowerCAmelCase : str = length
def __len__( self):
'''simple docstring'''
return self.length
def __getitem__( self, __a):
'''simple docstring'''
return i
class UpperCAmelCase_ :
def __call__( self, __a):
'''simple docstring'''
return {"input_ids": torch.tensor(__a), "labels": torch.tensor(__a)}
class UpperCAmelCase_ ( nn.Module):
def __init__( self):
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_lowerCAmelCase : str = nn.Linear(120, 80)
def snake_case__ ( self, __a, __a=None):
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0, device=input_ids.device), input_ids
else:
return input_ids
class UpperCAmelCase_ ( a):
@require_torch_neuroncore
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = f"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
_lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[int] = f"--output_dir {output_dir}".split()
_lowerCAmelCase : List[Any] = ["torchrun"] + distributed_args + args
execute_subprocess_async(__a, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class UpperCAmelCase_ ( a):
@require_torch_multi_gpu
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = f"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
_lowerCAmelCase : Any = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[int] = f"--output_dir {output_dir}".split()
_lowerCAmelCase : Any = ["torchrun"] + distributed_args + args
execute_subprocess_async(__a, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
_snake_case = HfArgumentParser((TrainingArguments,))
_snake_case = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
_snake_case = DummyDataset(dataset_length)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = list(range(len(_lowerCamelCase ) ) )
_lowerCAmelCase : Union[str, Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
F"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
_snake_case = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
_snake_case = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_snake_case = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_snake_case = 2
_snake_case = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_snake_case = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_snake_case = None
| 36
| 0
|
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowercase__ = pd.read_csv("""sample_data.csv""", header=None)
lowercase__ = df.shape[:1][0]
# If you're using some other dataset input the target column
lowercase__ = df.iloc[:, 1:2]
lowercase__ = actual_data.values.reshape(len_data, 1)
lowercase__ = MinMaxScaler().fit_transform(actual_data)
lowercase__ = 10
lowercase__ = 5
lowercase__ = 20
lowercase__ = len_data - periods * look_back
lowercase__ = actual_data[:division]
lowercase__ = actual_data[division - look_back :]
lowercase__ , lowercase__ = [], []
lowercase__ , lowercase__ = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowercase__ = np.array(train_x)
lowercase__ = np.array(test_x)
lowercase__ = np.array([list(i.ravel()) for i in train_y])
lowercase__ = np.array([list(i.ravel()) for i in test_y])
lowercase__ = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
lowercase__ = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
lowercase__ = model.predict(x_test)
| 241
|
from __future__ import annotations
import bisect
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
if hi < 0:
_lowerCAmelCase : int = len(_lowerCamelCase )
while lo < hi:
_lowerCAmelCase : Optional[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_lowerCAmelCase : Union[str, Any] = mid + 1
else:
_lowerCAmelCase : str = mid
return lo
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
if hi < 0:
_lowerCAmelCase : str = len(_lowerCamelCase )
while lo < hi:
_lowerCAmelCase : Tuple = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_lowerCAmelCase : Dict = mid + 1
else:
_lowerCAmelCase : str = mid
return lo
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_left(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_right(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 0
_lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase ) - 1
while left <= right:
_lowerCAmelCase : int = left + (right - left) // 2
_lowerCAmelCase : int = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_lowerCAmelCase : str = midpoint - 1
else:
_lowerCAmelCase : Any = midpoint + 1
return None
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = bisect.bisect_left(_lowerCamelCase , _lowerCamelCase )
if index != len(_lowerCamelCase ) and sorted_collection[index] == item:
return index
return None
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if right < left:
return None
_lowerCAmelCase : Optional[int] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , midpoint + 1 , _lowerCamelCase )
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by comma:\n").strip()
_snake_case = sorted(int(item) for item in user_input.split(","))
_snake_case = int(input("Enter a single number to be found in the list:\n"))
_snake_case = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''')
| 36
| 0
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
_snake_case = {
'gpt2': 1_024,
'gpt2-medium': 1_024,
'gpt2-large': 1_024,
'gpt2-xl': 1_024,
'distilgpt2': 1_024,
}
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Union[str, Any] = ['input_ids', 'attention_mask']
_SCREAMING_SNAKE_CASE : Optional[int] = GPTaTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="<|endoftext|>" , _UpperCamelCase="<|endoftext|>" , _UpperCamelCase="<|endoftext|>" , _UpperCamelCase=False , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(
__a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , add_prefix_space=__a , **__a , )
_lowercase : Tuple = kwargs.pop("add_bos_token" , __a )
_lowercase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __a ) != add_prefix_space:
_lowercase : Optional[int] = getattr(__a , pre_tok_state.pop("type" ) )
_lowercase : str = add_prefix_space
_lowercase : Tuple = pre_tok_class(**__a )
_lowercase : int = add_prefix_space
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : List[str] = kwargs.get("is_split_into_words" , __a )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__a , **__a )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : str = kwargs.get("is_split_into_words" , __a )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__a , **__a )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
_lowercase : int = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
_lowercase : Any = input_ids[-self.model_max_length :]
return input_ids
| 250
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCAmelCase_ ( a):
def snake_case__ ( self, __a):
'''simple docstring'''
return 0.0
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCAmelCase : Optional[int] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 512
_lowerCAmelCase : Union[str, Any] = [1] + [0] * (size - 1)
_lowerCAmelCase : Optional[Any] = [filter_type.process(_lowerCamelCase ) for item in inputs]
_lowerCAmelCase : int = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase : str = np.abs(np.fft.fft(_lowerCamelCase ) )
_lowerCAmelCase : Union[str, Any] = 20 * np.logaa(_lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
_lowerCAmelCase : List[Any] = get_bounds(_lowerCamelCase , _lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(_lowerCamelCase )
plt.show()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 512
_lowerCAmelCase : Optional[Any] = [1] + [0] * (size - 1)
_lowerCAmelCase : str = [filter_type.process(_lowerCamelCase ) for item in inputs]
_lowerCAmelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase : Optional[Any] = np.angle(np.fft.fft(_lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(_lowerCamelCase , -2 * pi ) )
plt.show()
| 36
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__lowerCamelCase = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 221
|
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
_lowerCAmelCase : List[str] = gray_code_sequence_string(_lowerCamelCase )
#
# convert them to integers
for i in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : List[str] = int(sequence[i] , 2 )
return sequence
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_lowerCAmelCase : List[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_lowerCAmelCase : Optional[int] = gray_code_sequence_string(bit_count - 1 )
_lowerCAmelCase : str = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_lowerCAmelCase : Dict = "0" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_lowerCAmelCase : Optional[Any] = "1" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
| 0
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_="last", SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, ) -> Optional[int]:
UpperCamelCase : Dict = parent
UpperCamelCase : str = batch_size
UpperCamelCase : str = seq_length
UpperCamelCase : Any = is_training
UpperCamelCase : Union[str, Any] = use_input_lengths
UpperCamelCase : Optional[Any] = use_token_type_ids
UpperCamelCase : Dict = use_labels
UpperCamelCase : Optional[Any] = gelu_activation
UpperCamelCase : List[str] = sinusoidal_embeddings
UpperCamelCase : Dict = causal
UpperCamelCase : Union[str, Any] = asm
UpperCamelCase : str = n_langs
UpperCamelCase : Optional[Any] = vocab_size
UpperCamelCase : Optional[Any] = n_special
UpperCamelCase : int = hidden_size
UpperCamelCase : Optional[Any] = num_hidden_layers
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : Tuple = hidden_dropout_prob
UpperCamelCase : Optional[int] = attention_probs_dropout_prob
UpperCamelCase : Any = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : Any = type_sequence_label_size
UpperCamelCase : Dict = initializer_range
UpperCamelCase : List[Any] = num_labels
UpperCamelCase : Any = num_choices
UpperCamelCase : Tuple = summary_type
UpperCamelCase : Union[str, Any] = use_proj
UpperCamelCase : Tuple = scope
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : str = None
if self.use_input_lengths:
UpperCamelCase : str = (
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase : Optional[Any] = None
if self.use_token_type_ids:
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length], self.n_langs )
UpperCamelCase : Optional[int] = None
UpperCamelCase : Dict = None
UpperCamelCase : List[str] = None
if self.use_labels:
UpperCamelCase : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size], 2 ).float()
UpperCamelCase : int = ids_tensor([self.batch_size], self.num_choices )
UpperCamelCase : Union[str, Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def snake_case_ ( self ) -> Optional[int]:
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> int:
UpperCamelCase : Optional[int] = FlaubertModel(config=__a )
model.to(__a )
model.eval()
UpperCamelCase : int = model(__a, lengths=__a, langs=__a )
UpperCamelCase : Optional[Any] = model(__a, langs=__a )
UpperCamelCase : List[str] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> List[Any]:
UpperCamelCase : Optional[Any] = FlaubertWithLMHeadModel(__a )
model.to(__a )
model.eval()
UpperCamelCase : List[Any] = model(__a, token_type_ids=__a, labels=__a )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> Dict:
UpperCamelCase : Dict = FlaubertForQuestionAnsweringSimple(__a )
model.to(__a )
model.eval()
UpperCamelCase : Union[str, Any] = model(__a )
UpperCamelCase : List[str] = model(__a, start_positions=__a, end_positions=__a )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> Dict:
UpperCamelCase : Optional[Any] = FlaubertForQuestionAnswering(__a )
model.to(__a )
model.eval()
UpperCamelCase : Any = model(__a )
UpperCamelCase : str = model(
__a, start_positions=__a, end_positions=__a, cls_index=__a, is_impossible=__a, p_mask=__a, )
UpperCamelCase : str = model(
__a, start_positions=__a, end_positions=__a, cls_index=__a, is_impossible=__a, )
(UpperCamelCase ) : List[Any] = result_with_labels.to_tuple()
UpperCamelCase : List[Any] = model(__a, start_positions=__a, end_positions=__a )
(UpperCamelCase ) : int = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> Dict:
UpperCamelCase : int = FlaubertForSequenceClassification(__a )
model.to(__a )
model.eval()
UpperCamelCase : Optional[int] = model(__a )
UpperCamelCase : List[Any] = model(__a, labels=__a )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> Tuple:
UpperCamelCase : Dict = self.num_labels
UpperCamelCase : Any = FlaubertForTokenClassification(__a )
model.to(__a )
model.eval()
UpperCamelCase : Any = model(__a, attention_mask=__a, labels=__a )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> Dict:
UpperCamelCase : Any = self.num_choices
UpperCamelCase : str = FlaubertForMultipleChoice(config=__a )
model.to(__a )
model.eval()
UpperCamelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCamelCase : Any = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCamelCase : Tuple = model(
__a, attention_mask=__a, token_type_ids=__a, labels=__a, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def snake_case_ ( self ) -> int:
UpperCamelCase : List[Any] = self.prepare_config_and_inputs()
(
UpperCamelCase
) : Optional[int] = config_and_inputs
UpperCamelCase : str = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Optional[Any] = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> List[Any]:
UpperCamelCase : str = super()._prepare_for_class(__a, __a, return_labels=__a )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
UpperCamelCase : List[str] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=__a )
UpperCamelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=__a )
return inputs_dict
def snake_case_ ( self ) -> Any:
UpperCamelCase : Optional[int] = FlaubertModelTester(self )
UpperCamelCase : List[str] = ConfigTester(self, config_class=__a, emb_dim=37 )
def snake_case_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__a )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__a )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__a )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__a )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__a )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__a )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__a )
@slow
def snake_case_ ( self ) -> List[Any]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Tuple = FlaubertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@slow
@require_torch_gpu
def snake_case_ ( self ) -> str:
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
UpperCamelCase : Tuple = True
UpperCamelCase : List[Any] = model_class(config=__a )
UpperCamelCase : Dict = self._prepare_for_class(__a, __a )
UpperCamelCase : Any = torch.jit.trace(
__a, (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__a, os.path.join(__a, 'traced_model.pt' ) )
UpperCamelCase : int = torch.jit.load(os.path.join(__a, 'traced_model.pt' ), map_location=__a )
loaded(inputs_dict['input_ids'].to(__a ), inputs_dict['attention_mask'].to(__a ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Dict = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' )
UpperCamelCase : List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
UpperCamelCase : Optional[int] = model(__a )[0]
UpperCamelCase : Dict = torch.Size((1, 11, 768) )
self.assertEqual(output.shape, __a )
UpperCamelCase : str = torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], __a, atol=1e-4 ) )
| 119
|
from PIL import Image
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = image.size
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Tuple = image.load()
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCamelCase ):
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_snake_case = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 36
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCamelCase_ :
__magic_name__ = XGLMConfig
__magic_name__ = {}
__magic_name__ = '''gelu'''
def __init__( self : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple=14 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Dict=99 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : Tuple=37 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[str]=512 , lowerCAmelCase_ : Dict=0.0_2 , ) -> Optional[Any]:
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Optional[Any] = use_input_mask
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : List[str] = d_model
UpperCAmelCase_ : int = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : int = ffn_dim
UpperCAmelCase_ : Any = activation_function
UpperCAmelCase_ : str = activation_dropout
UpperCAmelCase_ : List[Any] = attention_dropout
UpperCAmelCase_ : Dict = max_position_embeddings
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : Dict = 1
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
UpperCAmelCase_ : Dict = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCAmelCase_ : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : int = self.get_config()
UpperCAmelCase_ : Dict = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__a , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__a , )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
(
UpperCAmelCase_
) : List[Any] = config_and_inputs
UpperCAmelCase_ : List[Any] = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class UpperCamelCase_ (__A , __A , unittest.TestCase ):
__magic_name__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__magic_name__ = (TFXGLMForCausalLM,) if is_tf_available() else ()
__magic_name__ = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = TFXGLMModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self , config_class=__a , n_embd=37 )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
self.config_tester.run_common_tests()
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = TFXGLMModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
super().test_resize_token_embeddings()
@require_tf
class UpperCamelCase_ (unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Dict=True ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
UpperCAmelCase_ : Optional[Any] = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCAmelCase_ : Tuple = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
UpperCAmelCase_ : Tuple = model.generate(__a , do_sample=__a , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __a )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
UpperCAmelCase_ : Union[str, Any] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
UpperCAmelCase_ : List[str] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
UpperCAmelCase_ : Union[str, Any] = tokenizer("Today is a nice day and" , return_tensors="tf" )
UpperCAmelCase_ : Union[str, Any] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
UpperCAmelCase_ : Tuple = model.generate(__a , do_sample=__a , seed=[7, 0] )
UpperCAmelCase_ : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=__a )
UpperCAmelCase_ : Optional[Any] = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(__a , __a )
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
UpperCAmelCase_ : Optional[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
UpperCAmelCase_ : Any = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
UpperCAmelCase_ : Union[str, Any] = "left"
# use different length sentences to test batching
UpperCAmelCase_ : Tuple = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
UpperCAmelCase_ : Tuple = tokenizer(__a , return_tensors="tf" , padding=__a )
UpperCAmelCase_ : List[str] = inputs["input_ids"]
UpperCAmelCase_ : Any = model.generate(input_ids=__a , attention_mask=inputs["attention_mask"] , max_new_tokens=12 )
UpperCAmelCase_ : List[Any] = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
UpperCAmelCase_ : int = model.generate(input_ids=__a , max_new_tokens=12 )
UpperCAmelCase_ : List[Any] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
UpperCAmelCase_ : Optional[Any] = model.generate(input_ids=__a , max_new_tokens=12 )
UpperCAmelCase_ : List[Any] = tokenizer.batch_decode(__a , skip_special_tokens=__a )
UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a )
UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__a )
UpperCAmelCase_ : Union[str, Any] = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , [non_padded_sentence, padded_sentence] )
| 268
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'wav2vec2'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : Union[str, Any] = feat_extract_activation
_lowerCAmelCase : Optional[Any] = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : str = list(__a)
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : str = num_conv_pos_embeddings
_lowerCAmelCase : List[Any] = num_conv_pos_embedding_groups
_lowerCAmelCase : str = len(self.conv_dim)
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : Optional[Any] = hidden_dropout
_lowerCAmelCase : List[str] = attention_dropout
_lowerCAmelCase : Tuple = activation_dropout
_lowerCAmelCase : int = feat_proj_dropout
_lowerCAmelCase : List[str] = final_dropout
_lowerCAmelCase : int = layerdrop
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : Optional[Any] = do_stable_layer_norm
_lowerCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : str = apply_spec_augment
_lowerCAmelCase : Optional[Any] = mask_time_prob
_lowerCAmelCase : Optional[int] = mask_time_length
_lowerCAmelCase : List[str] = mask_time_min_masks
_lowerCAmelCase : Optional[int] = mask_feature_prob
_lowerCAmelCase : Optional[int] = mask_feature_length
_lowerCAmelCase : List[str] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : Union[str, Any] = num_codevectors_per_group
_lowerCAmelCase : str = num_codevector_groups
_lowerCAmelCase : Optional[int] = contrastive_logits_temperature
_lowerCAmelCase : Optional[int] = feat_quantizer_dropout
_lowerCAmelCase : Optional[int] = num_negatives
_lowerCAmelCase : Union[str, Any] = codevector_dim
_lowerCAmelCase : Any = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Tuple = ctc_loss_reduction
_lowerCAmelCase : Tuple = ctc_zero_infinity
# adapter
_lowerCAmelCase : List[Any] = add_adapter
_lowerCAmelCase : List[str] = adapter_kernel_size
_lowerCAmelCase : str = adapter_stride
_lowerCAmelCase : List[str] = num_adapter_layers
_lowerCAmelCase : str = output_hidden_size or hidden_size
_lowerCAmelCase : Tuple = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : str = list(__a)
_lowerCAmelCase : Union[str, Any] = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : Tuple = xvector_output_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 36
| 0
|
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowerCamelCase = []
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(_lowerCamelCase ) )
elif isinstance(_lowerCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(_lowerCamelCase ) )
elif isinstance(_lowerCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ) -> str:
"""simple docstring"""
__lowerCamelCase = []
for d in reversed(_lowerCamelCase ):
idx.append(flat_idx % d )
__lowerCamelCase = flat_idx // d
return tuple(reversed(_lowerCamelCase ) )
@torch.jit.ignore
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Any = None , UpperCamelCase__ : Union[str, Any] = None , ) -> Optional[int]:
"""simple docstring"""
def reduce_edge_list(UpperCamelCase__ : Dict ) -> None:
__lowerCamelCase = True
for i in range(len(_lowerCamelCase ) ):
__lowerCamelCase = -1 * (i + 1)
l[reversed_idx] &= tally
__lowerCamelCase = l[reversed_idx]
if start_edges is None:
__lowerCamelCase = [s == 0 for s in start]
reduce_edge_list(_lowerCamelCase )
if end_edges is None:
__lowerCamelCase = [e == (d - 1) for e, d in zip(_lowerCamelCase , _lowerCamelCase )]
reduce_edge_list(_lowerCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_lowerCamelCase ) == 0:
return [()]
elif len(_lowerCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
__lowerCamelCase = []
__lowerCamelCase = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_lowerCamelCase , _lowerCamelCase ):
if s == e:
path_list.append(slice(_lowerCamelCase , s + 1 ) )
else:
break
__lowerCamelCase = tuple(_lowerCamelCase )
__lowerCamelCase = len(_lowerCamelCase )
# start == end, and we're done
if divergence_idx == len(_lowerCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowerCamelCase = start[divergence_idx]
return tuple(
path + (slice(_lowerCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowerCamelCase = end[divergence_idx]
return tuple(
path + (slice(_lowerCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__lowerCamelCase = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = t.shape[:no_batch_dims]
__lowerCamelCase = list(_flat_idx_to_idx(_lowerCamelCase , _lowerCamelCase ) )
# _get_minimal_slice_set is inclusive
__lowerCamelCase = list(_flat_idx_to_idx(flat_end - 1 , _lowerCamelCase ) )
# Get an ordered list of slices to perform
__lowerCamelCase = _get_minimal_slice_set(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
__lowerCamelCase = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str = False , UpperCamelCase__ : Tuple = None , UpperCamelCase__ : Any = False , ) -> Optional[Any]:
"""simple docstring"""
if not (len(_lowerCamelCase ) > 0):
raise ValueError('Must provide at least one input' )
__lowerCamelCase = [shape[:no_batch_dims] for shape in _fetch_dims(_lowerCamelCase )]
__lowerCamelCase = tuple([max(_lowerCamelCase ) for s in zip(*_lowerCamelCase )] )
def _prep_inputs(UpperCamelCase__ : str ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__lowerCamelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__lowerCamelCase = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
__lowerCamelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__lowerCamelCase = tensor_tree_map(_prep_inputs , _lowerCamelCase )
__lowerCamelCase = None
if _out is not None:
__lowerCamelCase = tensor_tree_map(lambda UpperCamelCase__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
__lowerCamelCase = 1
for d in orig_batch_dims:
flat_batch_dim *= d
__lowerCamelCase = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(UpperCamelCase__ : List[str] ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__lowerCamelCase = 0
__lowerCamelCase = prepped_outputs
for _ in range(_lowerCamelCase ):
# Chunk the input
if not low_mem:
__lowerCamelCase = _select_chunk
else:
__lowerCamelCase = partial(
_chunk_slice , flat_start=_lowerCamelCase , flat_end=min(_lowerCamelCase , i + chunk_size ) , no_batch_dims=len(_lowerCamelCase ) , )
__lowerCamelCase = tensor_tree_map(_lowerCamelCase , _lowerCamelCase )
# Run the layer on the chunk
__lowerCamelCase = layer(**_lowerCamelCase )
# Allocate space for the output
if out is None:
__lowerCamelCase = tensor_tree_map(lambda UpperCamelCase__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _lowerCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(_lowerCamelCase , _lowerCamelCase ):
def assign(UpperCamelCase__ : Any , UpperCamelCase__ : List[str] ) -> None:
for k, v in da.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
assign(_lowerCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__lowerCamelCase = da[k]
assign(_lowerCamelCase , _lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
for xa, xa in zip(_lowerCamelCase , _lowerCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__lowerCamelCase = xa
elif isinstance(_lowerCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__lowerCamelCase = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
__lowerCamelCase = tensor_tree_map(lambda UpperCamelCase__ : t.view(orig_batch_dims + t.shape[1:] ) , _lowerCamelCase )
return out
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ = 512 , ) -> str:
'''simple docstring'''
__lowerCamelCase = max_chunk_size
__lowerCamelCase = None
__lowerCamelCase = None
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__lowerCamelCase = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
__lowerCamelCase = [c for c in candidates if c > min_chunk_size]
__lowerCamelCase = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(lowerCamelCase__ ) -> bool:
try:
with torch.no_grad():
fn(*__a , chunk_size=__a )
return True
except RuntimeError:
return False
__lowerCamelCase = 0
__lowerCamelCase = len(__a ) - 1
while i > min_viable_chunk_size_index:
__lowerCamelCase = test_chunk_size(candidates[i] )
if not viable:
__lowerCamelCase = (min_viable_chunk_size_index + i) // 2
else:
__lowerCamelCase = i
__lowerCamelCase = (i + len(__a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = True
for aa, aa in zip(__a , __a ):
assert type(__a ) == type(__a )
if isinstance(__a , (list, tuple) ):
consistent &= self._compare_arg_caches(__a , __a )
elif isinstance(__a , __a ):
__lowerCamelCase = [v for _, v in sorted(aa.items() , key=lambda lowerCamelCase__ : x[0] )]
__lowerCamelCase = [v for _, v in sorted(aa.items() , key=lambda lowerCamelCase__ : x[0] )]
consistent &= self._compare_arg_caches(__a , __a )
else:
consistent &= aa == aa
return consistent
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = True
__lowerCamelCase = tree_map(lambda lowerCamelCase__ : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__a )
__lowerCamelCase = self._compare_arg_caches(self.cached_arg_data , __a )
else:
# Otherwise, we can reuse the precomputed value
__lowerCamelCase = False
if not consistent:
__lowerCamelCase = self._determine_favorable_chunk_size(
__a , __a , __a , )
__lowerCamelCase = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 90
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , a , )
class UpperCAmelCase_ ( a):
lowerCamelCase__ = RobertaConfig
lowerCamelCase__ = 'roberta'
def __init__( self, __a):
'''simple docstring'''
super().__init__(__a)
_lowerCAmelCase : Optional[Any] = RobertaEmbeddings(__a)
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , a , )
class UpperCAmelCase_ ( a):
lowerCamelCase__ = RobertaConfig
lowerCamelCase__ = 'roberta'
def __init__( self, __a):
'''simple docstring'''
super().__init__(__a)
_lowerCAmelCase : Optional[int] = config.num_labels
_lowerCAmelCase : Optional[int] = config.num_hidden_layers
_lowerCAmelCase : Optional[int] = DeeRobertaModel(__a)
_lowerCAmelCase : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob)
_lowerCAmelCase : List[str] = nn.Linear(config.hidden_size, self.config.num_labels)
@add_start_docstrings_to_model_forward(__a)
def snake_case__ ( self, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=-1, __a=False, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.num_layers
try:
_lowerCAmelCase : List[Any] = self.roberta(
__a, attention_mask=__a, token_type_ids=__a, position_ids=__a, head_mask=__a, inputs_embeds=__a, )
_lowerCAmelCase : List[Any] = outputs[1]
_lowerCAmelCase : Dict = self.dropout(__a)
_lowerCAmelCase : Dict = self.classifier(__a)
_lowerCAmelCase : Optional[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : Union[str, Any] = e.exit_layer
_lowerCAmelCase : List[Any] = outputs[0]
if not self.training:
_lowerCAmelCase : int = entropy(__a)
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : str = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Optional[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1), labels.view(-1))
else:
_lowerCAmelCase : Optional[Any] = CrossEntropyLoss()
_lowerCAmelCase : Optional[Any] = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
# work with highway exits
_lowerCAmelCase : Optional[int] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Any = highway_exit[0]
if not self.training:
highway_logits_all.append(__a)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[str] = MSELoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1), labels.view(-1))
else:
_lowerCAmelCase : Dict = CrossEntropyLoss()
_lowerCAmelCase : Optional[Any] = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1))
highway_losses.append(__a)
if train_highway:
_lowerCAmelCase : int = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Optional[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 36
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.