code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 54
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ : Tuple = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[Any] = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Tuple = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__magic_name__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 280
| 0
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__lowerCAmelCase : Optional[int] = {
'E': 1_2.7_0,
'T': 9.0_6,
'A': 8.1_7,
'O': 7.5_1,
'I': 6.9_7,
'N': 6.7_5,
'S': 6.3_3,
'H': 6.0_9,
'R': 5.9_9,
'D': 4.2_5,
'L': 4.0_3,
'C': 2.7_8,
'U': 2.7_6,
'M': 2.4_1,
'W': 2.3_6,
'F': 2.2_3,
'G': 2.0_2,
'Y': 1.9_7,
'P': 1.9_3,
'B': 1.2_9,
'V': 0.9_8,
'K': 0.7_7,
'J': 0.1_5,
'X': 0.1_5,
'Q': 0.1_0,
'Z': 0.0_7,
}
__lowerCAmelCase : Tuple = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
__lowerCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __magic_name__ ( A : str ):
'''simple docstring'''
a = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __magic_name__ ( A : tuple ):
'''simple docstring'''
return x[0]
def __magic_name__ ( A : str ):
'''simple docstring'''
a = get_letter_count(A )
a = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(A )
a = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=A )
a = "".join(freq_to_letter[freq] )
a = list(freq_to_letter_str.items() )
freq_pairs.sort(key=A, reverse=A )
a = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(A )
def __magic_name__ ( A : str ):
'''simple docstring'''
a = get_frequency_order(A )
a = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : Dict = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662
| 1
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
if (ksize % 2) == 0:
__snake_case : Tuple = ksize + 1
__snake_case : Optional[Any] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(UpperCAmelCase_ ):
for x in range(UpperCAmelCase_ ):
# distance from center
__snake_case : Optional[int] = x - ksize // 2
__snake_case : Any = y - ksize // 2
# degree to radiant
__snake_case : Optional[Any] = theta / 180 * np.pi
__snake_case : List[Any] = np.cos(_theta )
__snake_case : Tuple = np.sin(_theta )
# get kernel x
__snake_case : Union[str, Any] = cos_theta * px + sin_theta * py
# get kernel y
__snake_case : List[Any] = -sin_theta * px + cos_theta * py
# fill kernel
__snake_case : Optional[Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__UpperCamelCase = imread("../image_data/lena.jpg")
# turn image in gray scale value
__UpperCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__UpperCamelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__UpperCamelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__UpperCamelCase = out / out.max() * 255
__UpperCamelCase = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 26
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 648
| 0
|
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
A: Any = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
A: Union[str, Any] = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
A: Union[str, Any] = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
A: Any = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
A: Union[str, Any] = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Optional[Any]:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase=[1, 10, 100] , _lowercase=4 , _lowercase=3.0 ) -> List[Any]:
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=_lowercase ) as executor:
lowercase_ : List[Any] = []
lowercase_ : Union[str, Any] = Counter()
lowercase_ : List[Any] = 0
lowercase_ : Any = defaultdict(_lowercase )
for task_id, (candidates, test_case) in enumerate(zip(_lowercase , _lowercase ) ):
for candidate in candidates:
lowercase_ : str = candidate + '\n' + test_case
lowercase_ : Tuple = (test_program, timeout, task_id, completion_id[task_id])
lowercase_ : List[Any] = executor.submit(_lowercase , *_lowercase )
futures.append(_lowercase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_lowercase ):
lowercase_ : Optional[Any] = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
lowercase_ , lowercase_ : Optional[Any] = [], []
for result in results.values():
result.sort()
lowercase_ : Dict = [r[1]['passed'] for r in result]
total.append(len(_lowercase ) )
correct.append(sum(_lowercase ) )
lowercase_ : Optional[Any] = np.array(_lowercase )
lowercase_ : List[Any] = np.array(_lowercase )
lowercase_ : List[str] = k
lowercase_ : Dict = {f"pass@{k}": estimate_pass_at_k(_lowercase , _lowercase , _lowercase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCAmelCase ( a : Union[str, Any] , a : Union[str, Any] , a : Any ) -> Any:
"""simple docstring"""
def estimator(a : int , a : int , a : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(a , a ):
lowercase_ : Optional[Any] = itertools.repeat(a , len(a ) )
else:
assert len(a ) == len(a )
lowercase_ : Tuple = iter(a )
return np.array([estimator(int(a ) , int(a ) , a ) for n, c in zip(a , a )] )
| 7
|
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( a : Dict , a : Optional[int] , a : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Any = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase_ : List[str] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
lowercase_ : Optional[Any] = f"{src_lang}-{tgt_lang}"
lowercase_ : Optional[Any] = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(a , exist_ok=a )
lowercase_ : int = os.path.join(a , 'README.md' )
print(f"Generating {path}" )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(a )
# make sure we are under the root of the project
A: List[str] = Path(__file__).resolve().parent.parent.parent
A: List[str] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A , A , A: Any = model_name.split("-")
A: int = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 7
| 1
|
'''simple docstring'''
def _UpperCamelCase ( lowerCAmelCase__: list ) -> int:
if not grid or not grid[0]:
raise TypeError('The grid does not contain the appropriate information' )
for cell_n in range(1 ,len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
SCREAMING_SNAKE_CASE_ = grid[0]
for row_n in range(1 ,len(__lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = grid[row_n]
SCREAMING_SNAKE_CASE_ = fill_row(__lowerCAmelCase ,__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = grid[row_n]
return grid[-1][-1]
def _UpperCamelCase ( lowerCAmelCase__: list ,lowerCAmelCase__: list ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 ,len(__lowerCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] ,row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ) -> Optional[int]:
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __lowerCamelCase ( __lowerCAmelCase : str ) -> Union[str, Any]:
__UpperCamelCase : Dict = _TestCommandArgs(dataset=__lowerCAmelCase , all_configs=__lowerCAmelCase , save_infos=__lowerCAmelCase )
__UpperCamelCase : List[Any] = TestCommand(*__lowerCAmelCase )
test_command.run()
__UpperCamelCase : Any = os.path.join(__lowerCAmelCase , """README.md""" )
assert os.path.exists(__lowerCAmelCase )
__UpperCamelCase : Optional[int] = DatasetInfosDict.from_directory(__lowerCAmelCase )
__UpperCamelCase : List[Any] = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2351563,
"""num_examples""": 10000,
},
{
"""name""": """validation""",
"""num_bytes""": 238418,
"""num_examples""": 1000,
},
] , download_size=3940680 , dataset_size=2589981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = getattr(dataset_infos["""default"""] , __lowerCAmelCase ), getattr(expected_dataset_infos["""default"""] , __lowerCAmelCase )
if key == "num_bytes":
assert is_apercent_close(__lowerCAmelCase , __lowerCAmelCase )
elif key == "splits":
assert list(__lowerCAmelCase ) == list(__lowerCAmelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 269
| 0
|
'''simple docstring'''
import functools
from typing import Any
def A ( A_ : str , A_ : list[str] ):
# Validation
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError('''the string should be not empty string''' )
if not isinstance(A_ , A_ ) or not all(
isinstance(A_ , A_ ) and len(A_ ) > 0 for item in words ):
raise ValueError('''the words should be a list of non-empty strings''' )
# Build trie
snake_case : dict[str, Any] = {}
snake_case : int = '''WORD_KEEPER'''
for word in words:
snake_case : List[str] = trie
for c in word:
if c not in trie_node:
snake_case : Union[str, Any] = {}
snake_case : Any = trie_node[c]
snake_case : Any = True
snake_case : Tuple = len(A_ )
# Dynamic programming method
@functools.cache
def is_breakable(A_ : int ) -> bool:
if index == len_string:
return True
snake_case : Any = trie
for i in range(A_ , A_ ):
snake_case : str = trie_node.get(string[i] , A_ )
if trie_node is None:
return False
if trie_node.get(A_ , A_ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
UpperCAmelCase = {
"gpt2": 1_024,
"gpt2-medium": 1_024,
"gpt2-large": 1_024,
"gpt2-xl": 1_024,
"distilgpt2": 1_024,
}
class a ( __magic_name__ ):
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['''input_ids''', '''attention_mask''']
_snake_case = GPTaTokenizer
def __init__( self : Union[str, Any], SCREAMING_SNAKE_CASE_ : Union[str, Any]=None, SCREAMING_SNAKE_CASE_ : Any=None, SCREAMING_SNAKE_CASE_ : List[str]=None, SCREAMING_SNAKE_CASE_ : Union[str, Any]="<|endoftext|>", SCREAMING_SNAKE_CASE_ : Optional[int]="<|endoftext|>", SCREAMING_SNAKE_CASE_ : Dict="<|endoftext|>", SCREAMING_SNAKE_CASE_ : Union[str, Any]=False, **SCREAMING_SNAKE_CASE_ : Tuple, ):
super().__init__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
snake_case : Optional[Any] = kwargs.pop('''add_bos_token''', SCREAMING_SNAKE_CASE_ )
snake_case : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
snake_case : List[Any] = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop('''type''' ) )
snake_case : Dict = add_prefix_space
snake_case : List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = add_prefix_space
def __snake_case ( self : Optional[Any], *SCREAMING_SNAKE_CASE_ : Dict, **SCREAMING_SNAKE_CASE_ : List[str] ):
snake_case : str = kwargs.get('''is_split_into_words''', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any], *SCREAMING_SNAKE_CASE_ : Tuple, **SCREAMING_SNAKE_CASE_ : int ):
snake_case : int = kwargs.get('''is_split_into_words''', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[Any], SCREAMING_SNAKE_CASE_ : str, SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
snake_case : List[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any], SCREAMING_SNAKE_CASE_ : "Conversation" ):
snake_case : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_ ) + [self.eos_token_id] )
if len(SCREAMING_SNAKE_CASE_ ) > self.model_max_length:
snake_case : str = input_ids[-self.model_max_length :]
return input_ids
| 555
| 0
|
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def a__ ( lowercase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = fname.split(os.path.sep )[-1]
return re.search(r'''^(.*)_\d+\.jpg$''', lowercase ).groups()[0]
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : List[str]=None ) -> Any:
'''simple docstring'''
_UpperCamelCase = file_names
_UpperCamelCase = image_transform
_UpperCamelCase = label_to_id
def __len__( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
return len(self.file_names )
def __getitem__( self : Any , lowerCAmelCase__ : int ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.file_names[idx]
_UpperCamelCase = PIL.Image.open(lowerCAmelCase__ )
_UpperCamelCase = raw_image.convert('''RGB''' )
if self.image_transform is not None:
_UpperCamelCase = self.image_transform(lowerCAmelCase__ )
_UpperCamelCase = extract_label(lowerCAmelCase__ )
if self.label_to_id is not None:
_UpperCamelCase = self.label_to_id[label]
return {"image": image, "label": label}
def a__ ( lowercase : Optional[int], lowercase : List[Any] ) -> List[Any]:
"""simple docstring"""
if args.with_tracking:
_UpperCamelCase = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with='''all''', project_dir=args.project_dir )
else:
_UpperCamelCase = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase = config['''lr''']
_UpperCamelCase = int(config['''num_epochs'''] )
_UpperCamelCase = int(config['''seed'''] )
_UpperCamelCase = int(config['''batch_size'''] )
_UpperCamelCase = config['''image_size''']
if not isinstance(lowercase, (list, tuple) ):
_UpperCamelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps, '''isdigit''' ):
if args.checkpointing_steps == "epoch":
_UpperCamelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_UpperCamelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
_UpperCamelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_UpperCamelCase = os.path.split(lowercase )[-1].split('''.''' )[0]
accelerator.init_trackers(lowercase, lowercase )
# Grab all the image filenames
_UpperCamelCase = [os.path.join(args.data_dir, lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )]
# Build the label correspondences
_UpperCamelCase = [extract_label(lowercase ) for fname in file_names]
_UpperCamelCase = list(set(lowercase ) )
id_to_label.sort()
_UpperCamelCase = {lbl: i for i, lbl in enumerate(lowercase )}
# Set the seed before splitting the data.
np.random.seed(lowercase )
torch.manual_seed(lowercase )
torch.cuda.manual_seed_all(lowercase )
# Split our filenames between train and validation
_UpperCamelCase = np.random.permutation(len(lowercase ) )
_UpperCamelCase = int(0.8 * len(lowercase ) )
_UpperCamelCase = random_perm[:cut]
_UpperCamelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_UpperCamelCase = Compose([RandomResizedCrop(lowercase, scale=(0.5, 1.0) ), ToTensor()] )
_UpperCamelCase = PetsDataset(
[file_names[i] for i in train_split], image_transform=lowercase, label_to_id=lowercase )
# For evaluation, we use a deterministic Resize
_UpperCamelCase = Compose([Resize(lowercase ), ToTensor()] )
_UpperCamelCase = PetsDataset([file_names[i] for i in eval_split], image_transform=lowercase, label_to_id=lowercase )
# Instantiate dataloaders.
_UpperCamelCase = DataLoader(lowercase, shuffle=lowercase, batch_size=lowercase, num_workers=4 )
_UpperCamelCase = DataLoader(lowercase, shuffle=lowercase, batch_size=lowercase, num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase = create_model('''resnet50d''', pretrained=lowercase, num_classes=len(lowercase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCamelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_UpperCamelCase = False
for param in model.get_classifier().parameters():
_UpperCamelCase = True
# We normalize the batches of images to be a bit faster.
_UpperCamelCase = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device )
_UpperCamelCase = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_UpperCamelCase = torch.optim.Adam(params=model.parameters(), lr=lr / 25 )
# Instantiate learning rate scheduler
_UpperCamelCase = OneCycleLR(optimizer=lowercase, max_lr=lowercase, epochs=lowercase, steps_per_epoch=len(lowercase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = accelerator.prepare(
lowercase, lowercase, lowercase, lowercase, lowercase )
# We need to keep track of how many total steps we have iterated over
_UpperCamelCase = 0
# We also need to keep track of the starting epoch so files are named properly
_UpperCamelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
_UpperCamelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_UpperCamelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_UpperCamelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_UpperCamelCase = os.path.splitext(lowercase )[0]
if "epoch" in training_difference:
_UpperCamelCase = int(training_difference.replace('''epoch_''', '''''' ) ) + 1
_UpperCamelCase = None
else:
_UpperCamelCase = int(training_difference.replace('''step_''', '''''' ) )
_UpperCamelCase = resume_step // len(lowercase )
resume_step -= starting_epoch * len(lowercase )
# Now we train the model
for epoch in range(lowercase, lowercase ):
model.train()
if args.with_tracking:
_UpperCamelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_UpperCamelCase = accelerator.skip_first_batches(lowercase, lowercase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_UpperCamelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_UpperCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
_UpperCamelCase = (batch['''image'''] - mean) / std
_UpperCamelCase = model(lowercase )
_UpperCamelCase = torch.nn.functional.cross_entropy(lowercase, batch['''label'''] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowercase, lowercase ):
_UpperCamelCase = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_UpperCamelCase = os.path.join(args.output_dir, lowercase )
accelerator.save_state(lowercase )
model.eval()
_UpperCamelCase = 0
_UpperCamelCase = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_UpperCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
_UpperCamelCase = (batch['''image'''] - mean) / std
with torch.no_grad():
_UpperCamelCase = model(lowercase )
_UpperCamelCase = outputs.argmax(dim=-1 )
_UpperCamelCase , _UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''label''']) )
_UpperCamelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_UpperCamelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'''accuracy''': 100 * eval_metric,
'''train_loss''': total_loss.item() / len(lowercase ),
'''epoch''': epoch,
}, step=lowercase, )
if checkpointing_steps == "epoch":
_UpperCamelCase = F"""epoch_{epoch}"""
if args.output_dir is not None:
_UpperCamelCase = os.path.join(args.output_dir, lowercase )
accelerator.save_state(lowercase )
if args.with_tracking:
accelerator.end_training()
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument('''--data_dir''', required=lowercase, help='''The data folder on disk.''' )
parser.add_argument('''--fp16''', action='''store_true''', help='''If passed, will use FP16 training.''' )
parser.add_argument(
'''--mixed_precision''', type=lowercase, default=lowercase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--checkpointing_steps''', type=lowercase, default=lowercase, help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''', )
parser.add_argument(
'''--output_dir''', type=lowercase, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', )
parser.add_argument(
'''--resume_from_checkpoint''', type=lowercase, default=lowercase, help='''If the training should continue from a checkpoint folder.''', )
parser.add_argument(
'''--with_tracking''', action='''store_true''', help='''Whether to load in all available experiment trackers from the environment and use them for logging.''', )
parser.add_argument(
'''--project_dir''', type=lowercase, default='''logs''', help='''Location on where to store experiment tracking logs` and relevent project information''', )
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = {'''lr''': 3e-2, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 64, '''image_size''': 224}
training_function(lowercase, lowercase )
if __name__ == "__main__":
main()
| 98
|
from collections.abc import Callable
import numpy as np
def lowerCamelCase_ ( lowerCAmelCase__ : Callable , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> np.array:
'''simple docstring'''
A = int(np.ceil((x_end - xa) / step_size ) )
A = np.zeros((n + 1,) )
A = ya
A = xa
for k in range(lowerCAmelCase__ ):
A = y[k] + step_size * ode_func(lowerCAmelCase__ , y[k] )
A = y[k] + (
(step_size / 2) * (ode_func(lowerCAmelCase__ , y[k] ) + ode_func(x + step_size , lowerCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Optional[int] = """efficientnet"""
def __init__( self , __UpperCAmelCase = 3 , __UpperCAmelCase = 6_0_0 , __UpperCAmelCase = 2.0 , __UpperCAmelCase = 3.1 , __UpperCAmelCase = 8 , __UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , __UpperCAmelCase = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __UpperCAmelCase = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __UpperCAmelCase = [] , __UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , __UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , __UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , __UpperCAmelCase = 0.25 , __UpperCAmelCase = "swish" , __UpperCAmelCase = 2_5_6_0 , __UpperCAmelCase = "mean" , __UpperCAmelCase = 0.02 , __UpperCAmelCase = 0.0_01 , __UpperCAmelCase = 0.99 , __UpperCAmelCase = 0.5 , __UpperCAmelCase = 0.2 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :Tuple = num_channels
lowerCAmelCase__ :Any = image_size
lowerCAmelCase__ :List[str] = width_coefficient
lowerCAmelCase__ :Tuple = depth_coefficient
lowerCAmelCase__ :Dict = depth_divisor
lowerCAmelCase__ :Optional[int] = kernel_sizes
lowerCAmelCase__ :Any = in_channels
lowerCAmelCase__ :Tuple = out_channels
lowerCAmelCase__ :Optional[Any] = depthwise_padding
lowerCAmelCase__ :List[Any] = strides
lowerCAmelCase__ :List[Any] = num_block_repeats
lowerCAmelCase__ :str = expand_ratios
lowerCAmelCase__ :List[str] = squeeze_expansion_ratio
lowerCAmelCase__ :Optional[Any] = hidden_act
lowerCAmelCase__ :str = hidden_dim
lowerCAmelCase__ :List[str] = pooling_type
lowerCAmelCase__ :Tuple = initializer_range
lowerCAmelCase__ :Any = batch_norm_eps
lowerCAmelCase__ :Tuple = batch_norm_momentum
lowerCAmelCase__ :Dict = dropout_rate
lowerCAmelCase__ :int = drop_connect_rate
lowerCAmelCase__ :Optional[Any] = sum(__UpperCAmelCase ) * 4
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :str = version.parse("""1.11""" )
@property
def snake_case ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case ( self ):
'''simple docstring'''
return 1E-5
| 720
|
"""simple docstring"""
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = None
lowerCAmelCase__ :List[str] = None
lowerCAmelCase__ :Optional[int] = graph
self._normalize_graph(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Dict = len(__UpperCAmelCase )
lowerCAmelCase__ :str = None
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if sources is int:
lowerCAmelCase__ :List[str] = [sources]
if sinks is int:
lowerCAmelCase__ :Optional[Any] = [sinks]
if len(__UpperCAmelCase ) == 0 or len(__UpperCAmelCase ) == 0:
return
lowerCAmelCase__ :List[str] = sources[0]
lowerCAmelCase__ :List[str] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__UpperCAmelCase ) > 1 or len(__UpperCAmelCase ) > 1:
lowerCAmelCase__ :Tuple = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowerCAmelCase__ :List[str] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowerCAmelCase__ :Any = max_input_flow
lowerCAmelCase__ :Optional[Any] = 0
lowerCAmelCase__ :Optional[int] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowerCAmelCase__ :Optional[int] = max_input_flow
lowerCAmelCase__ :Tuple = size - 1
def snake_case ( self ):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = algorithm(self )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = flow_network
lowerCAmelCase__ :List[Any] = flow_network.verticesCount
lowerCAmelCase__ :Optional[Any] = flow_network.sourceIndex
lowerCAmelCase__ :Tuple = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowerCAmelCase__ :Optional[int] = flow_network.graph
lowerCAmelCase__ :List[str] = False
def snake_case ( self ):
'''simple docstring'''
if not self.executed:
self._algorithm()
lowerCAmelCase__ :List[Any] = True
def snake_case ( self ):
'''simple docstring'''
pass
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
# use this to save your result
lowerCAmelCase__ :Dict = -1
def snake_case ( self ):
'''simple docstring'''
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowerCAmelCase__ :int = [0] * self.verticies_count
lowerCAmelCase__ :str = [0] * self.verticies_count
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowerCAmelCase__ :str = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowerCAmelCase__ :int = 0
while i < len(__UpperCAmelCase ):
lowerCAmelCase__ :Tuple = vertices_list[i]
lowerCAmelCase__ :List[Any] = self.heights[vertex_index]
self.process_vertex(__UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__UpperCAmelCase ) )
lowerCAmelCase__ :int = 0
else:
i += 1
lowerCAmelCase__ :Tuple = sum(self.preflow[self.source_index] )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__UpperCAmelCase , __UpperCAmelCase )
self.relabel(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowerCAmelCase__ :Any = self.heights[to_index]
if min_height is not None:
lowerCAmelCase__ :Any = min_height + 1
if __name__ == "__main__":
__A = [0]
__A = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__A = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__A = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__A = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 560
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 593
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
__A = get_logger(__name__)
def __a ( lowerCAmelCase_ : List[str] ,lowerCAmelCase_ : str ,lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : str=0 ) -> Tuple:
'''simple docstring'''
os.makedirs(lowerCAmelCase_ ,exist_ok=lowerCAmelCase_ )
with FSDP.state_dict_type(
lowerCAmelCase_ ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
UpperCAmelCase_= model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
UpperCAmelCase_= F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,lowerCAmelCase_ )
if accelerator.process_index == 0:
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(lowerCAmelCase_ ,lowerCAmelCase_ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
UpperCAmelCase_= (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,lowerCAmelCase_ )
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(lowerCAmelCase_ ,lowerCAmelCase_ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,F"""{MODEL_NAME}_{model_index}""" )
os.makedirs(lowerCAmelCase_ ,exist_ok=lowerCAmelCase_ )
logger.info(F"""Saving model to {ckpt_dir}""" )
UpperCAmelCase_= {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=lowerCAmelCase_ ,storage_writer=dist_cp.FileSystemWriter(lowerCAmelCase_ ) ,planner=DefaultSavePlanner() ,)
logger.info(F"""Model saved to {ckpt_dir}""" )
def __a ( lowerCAmelCase_ : Dict ,lowerCAmelCase_ : str ,lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Union[str, Any]=0 ) -> Optional[Any]:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowerCAmelCase_ ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(lowerCAmelCase_ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
UpperCAmelCase_= F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,lowerCAmelCase_ )
logger.info(F"""Loading model from {input_model_file}""" )
UpperCAmelCase_= torch.load(lowerCAmelCase_ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
UpperCAmelCase_= (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,lowerCAmelCase_ )
logger.info(F"""Loading model from {input_model_file}""" )
UpperCAmelCase_= torch.load(lowerCAmelCase_ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
UpperCAmelCase_= (
os.path.join(lowerCAmelCase_ ,F"""{MODEL_NAME}_{model_index}""" )
if F"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading model from {ckpt_dir}""" )
UpperCAmelCase_= {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=lowerCAmelCase_ ,storage_reader=dist_cp.FileSystemReader(lowerCAmelCase_ ) ,planner=DefaultLoadPlanner() ,)
UpperCAmelCase_= state_dict["""model"""]
logger.info(F"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(lowerCAmelCase_ )
def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : str ,lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : Any=0 ) -> List[str]:
'''simple docstring'''
os.makedirs(lowerCAmelCase_ ,exist_ok=lowerCAmelCase_ )
with FSDP.state_dict_type(
lowerCAmelCase_ ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
UpperCAmelCase_= FSDP.optim_state_dict(lowerCAmelCase_ ,lowerCAmelCase_ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
UpperCAmelCase_= (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,lowerCAmelCase_ )
logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(lowerCAmelCase_ ,lowerCAmelCase_ )
logger.info(F"""Optimizer state saved in {output_optimizer_file}""" )
else:
UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(lowerCAmelCase_ ,exist_ok=lowerCAmelCase_ )
logger.info(F"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} ,storage_writer=dist_cp.FileSystemWriter(lowerCAmelCase_ ) ,planner=DefaultSavePlanner() ,)
logger.info(F"""Optimizer state saved in {ckpt_dir}""" )
def __a ( lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : Dict ,lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : int ,lowerCAmelCase_ : int ,lowerCAmelCase_ : str=0 ) -> Union[str, Any]:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowerCAmelCase_ ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
UpperCAmelCase_= None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
UpperCAmelCase_= (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,lowerCAmelCase_ )
logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" )
UpperCAmelCase_= torch.load(lowerCAmelCase_ )
logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" )
else:
UpperCAmelCase_= (
os.path.join(lowerCAmelCase_ ,F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if F"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading Optimizer from {ckpt_dir}""" )
UpperCAmelCase_= load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() ,optimizer_key="""optimizer""" ,storage_reader=dist_cp.FileSystemReader(lowerCAmelCase_ ) ,)
UpperCAmelCase_= optim_state["""optimizer"""]
logger.info(F"""Optimizer loaded from {ckpt_dir}""" )
UpperCAmelCase_= FSDP.optim_state_dict_to_load(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
optimizer.load_state_dict(lowerCAmelCase_ )
| 593
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_layoutlmv3": [
"LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LayoutLMv3Config",
"LayoutLMv3OnnxConfig",
],
"processing_layoutlmv3": ["LayoutLMv3Processor"],
"tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["LayoutLMv3TokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv3ForQuestionAnswering",
"LayoutLMv3ForSequenceClassification",
"LayoutLMv3ForTokenClassification",
"LayoutLMv3Model",
"LayoutLMv3PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMv3ForQuestionAnswering",
"TFLayoutLMv3ForSequenceClassification",
"TFLayoutLMv3ForTokenClassification",
"TFLayoutLMv3Model",
"TFLayoutLMv3PreTrainedModel",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["LayoutLMv3FeatureExtractor"]
_snake_case = ["LayoutLMv3ImageProcessor"]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 658
|
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
_lowerCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 658
| 1
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
a_ : int = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _snake_case ( datasets.BuilderConfig ):
_lowercase : Optional[datasets.Features] = None
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , ):
import pyspark
def generate_fn():
SCREAMING_SNAKE_CASE = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id'))
for partition_id in partition_order:
SCREAMING_SNAKE_CASE = df_with_partition_id.select('*').where(F'''part_id = {partition_id}''').drop('part_id')
SCREAMING_SNAKE_CASE = partition_df.collect()
SCREAMING_SNAKE_CASE = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class _snake_case ( _BaseExamplesIterable ):
def __init__( self , a , a=None , ) -> Tuple:
SCREAMING_SNAKE_CASE = df
SCREAMING_SNAKE_CASE = partition_order or range(self.df.rdd.getNumPartitions())
SCREAMING_SNAKE_CASE = _generate_iterable_examples(self.df , self.partition_order)
def __iter__( self) -> Dict:
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE__ ( self , a) -> "SparkExamplesIterable":
SCREAMING_SNAKE_CASE = list(range(self.df.rdd.getNumPartitions()))
generator.shuffle(a)
return SparkExamplesIterable(self.df , partition_order=a)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> "SparkExamplesIterable":
SCREAMING_SNAKE_CASE = self.split_shard_indices_by_worker(a , a)
return SparkExamplesIterable(self.df , partition_order=a)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return len(self.partition_order)
class _snake_case ( datasets.DatasetBuilder ):
_lowercase : List[Any] = SparkConfig
def __init__( self , a , a = None , a = None , **a , ) -> List[Any]:
import pyspark
SCREAMING_SNAKE_CASE = pyspark.sql.SparkSession.builder.getOrCreate()
SCREAMING_SNAKE_CASE = df
SCREAMING_SNAKE_CASE = working_dir
super().__init__(
cache_dir=a , config_name=str(self.df.semanticHash()) , **a , )
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
# Returns the path of the created file.
def create_cache_and_write_probe(a):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a)
SCREAMING_SNAKE_CASE = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex)
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a , 'a')
return [probe_file]
if self._spark.conf.get('spark.master' , '').startswith('local'):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
SCREAMING_SNAKE_CASE = (
self._spark.sparkContext.parallelize(range(1) , 1).mapPartitions(a).collect()
)
if os.path.isfile(probe[0]):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir')
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN)]
def SCREAMING_SNAKE_CASE__ ( self , a) -> Union[str, Any]:
import pyspark
def get_arrow_batch_size(a):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]})
SCREAMING_SNAKE_CASE = self.df.count()
SCREAMING_SNAKE_CASE = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
SCREAMING_SNAKE_CASE = (
self.df.limit(a)
.repartition(1)
.mapInArrow(a , 'batch_bytes: long')
.agg(pyspark.sql.functions.sum('batch_bytes').alias('sample_bytes'))
.collect()[0]
.sample_bytes
/ sample_num_rows
)
SCREAMING_SNAKE_CASE = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
SCREAMING_SNAKE_CASE = min(a , int(approx_total_size / max_shard_size))
SCREAMING_SNAKE_CASE = self.df.repartition(a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
SCREAMING_SNAKE_CASE = ParquetWriter if file_format == 'parquet' else ArrowWriter
SCREAMING_SNAKE_CASE = os.path.join(self._working_dir , os.path.basename(a)) if self._working_dir else fpath
SCREAMING_SNAKE_CASE = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
SCREAMING_SNAKE_CASE = self.config.features
SCREAMING_SNAKE_CASE = self._writer_batch_size
SCREAMING_SNAKE_CASE = self._fs.storage_options
def write_arrow(a):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
SCREAMING_SNAKE_CASE = pyspark.TaskContext().taskAttemptId()
SCREAMING_SNAKE_CASE = next(a , a)
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = writer_class(
features=a , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''').replace('TTTTT' , f'''{task_id:05d}''') , writer_batch_size=a , storage_options=a , embed_local_files=a , )
SCREAMING_SNAKE_CASE = pa.Table.from_batches([first_batch])
writer.write_table(a)
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
SCREAMING_SNAKE_CASE = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''').replace('TTTTT' , f'''{task_id:05d}''') , writer_batch_size=a , storage_options=a , embed_local_files=a , )
SCREAMING_SNAKE_CASE = pa.Table.from_batches([batch])
writer.write_table(a)
if writer._num_bytes > 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a)):
SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(a) , os.path.basename(a))
shutil.move(a , a)
SCREAMING_SNAKE_CASE = (
self.df.mapInArrow(a , 'task_id: long, num_examples: long, num_bytes: long')
.groupBy('task_id')
.agg(
pyspark.sql.functions.sum('num_examples').alias('total_num_examples') , pyspark.sql.functions.sum('num_bytes').alias('total_num_bytes') , pyspark.sql.functions.count('num_bytes').alias('num_shards') , pyspark.sql.functions.collect_list('num_examples').alias('shard_lengths') , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE__ ( self , a , a = "arrow" , a = None , a = None , **a , ) -> List[str]:
self._validate_cache_dir()
SCREAMING_SNAKE_CASE = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE)
self._repartition_df_if_needed(a)
SCREAMING_SNAKE_CASE = not is_remote_filesystem(self._fs)
SCREAMING_SNAKE_CASE = os.path.join if is_local else posixpath.join
SCREAMING_SNAKE_CASE = '-TTTTT-SSSSS-of-NNNNN'
SCREAMING_SNAKE_CASE = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
SCREAMING_SNAKE_CASE = path_join(self._output_dir , a)
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for task_id, content in self._prepare_split_single(a , a , a):
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards))
all_shard_lengths.extend(a)
SCREAMING_SNAKE_CASE = total_num_examples
SCREAMING_SNAKE_CASE = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''')
if total_shards > 1:
SCREAMING_SNAKE_CASE = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
SCREAMING_SNAKE_CASE = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a , a , a , ):
rename(
a , fpath.replace('SSSSS' , f'''{shard_id:05d}''').replace('TTTTT' , f'''{task_id:05d}''') , fpath.replace('TTTTT-SSSSS' , f'''{global_shard_id:05d}''').replace('NNNNN' , f'''{total_shards:05d}''') , )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
for i in range(len(a)):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = task_id_and_num_shards[i]
for shard_id in range(a):
args.append([task_id, shard_id, global_shard_id])
global_shard_id += 1
self._spark.sparkContext.parallelize(a , len(a)).map(lambda a: _rename_shard(*a)).collect()
else:
# don't use any pattern
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f'''{shard_id:05d}''').replace('TTTTT' , f'''{task_id:05d}''') , fpath.replace(a , '') , )
def SCREAMING_SNAKE_CASE__ ( self , a , ) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df)
| 73
|
from __future__ import annotations
a_ : str = []
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
for i in range(len(_UpperCAmelCase)):
if board[row][i] == 1:
return False
for i in range(len(_UpperCAmelCase)):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1) , range(_UpperCAmelCase , -1 , -1)):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1) , range(_UpperCAmelCase , len(_UpperCAmelCase))):
if board[i][j] == 1:
return False
return True
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
if row >= len(_UpperCAmelCase):
solution.append(_UpperCAmelCase)
printboard(_UpperCAmelCase)
print()
return True
for i in range(len(_UpperCAmelCase)):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = 1
solve(_UpperCAmelCase , row + 1)
SCREAMING_SNAKE_CASE = 0
return False
def lowerCamelCase__ (_UpperCAmelCase):
for i in range(len(_UpperCAmelCase)):
for j in range(len(_UpperCAmelCase)):
if board[i][j] == 1:
print('Q' , end=' ')
else:
print('.' , end=' ')
print()
# n=int(input("The no. of queens"))
a_ : Tuple = 8
a_ : int = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 73
| 1
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self : Union[str, Any] , snake_case : int , snake_case : Optional[int]=3 , snake_case : List[str]=32 , snake_case : Any=3 , snake_case : List[str]=10 , snake_case : Tuple=[10, 20, 30, 40] , snake_case : Tuple=[1, 1, 2, 1] , snake_case : Dict=True , snake_case : List[str]=True , snake_case : List[Any]="relu" , snake_case : Tuple=3 , snake_case : Optional[Any]=None , ) -> List[str]:
'''simple docstring'''
A = parent
A = batch_size
A = image_size
A = num_channels
A = embeddings_size
A = hidden_sizes
A = depths
A = is_training
A = use_labels
A = hidden_act
A = num_labels
A = scope
A = len(snake_case )
def A_ ( self : Any ) -> Any:
'''simple docstring'''
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.num_labels )
A = self.get_config()
return config, pixel_values, labels
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A_ ( self : List[str] , snake_case : int , snake_case : Tuple , snake_case : Optional[Any] ) -> Any:
'''simple docstring'''
A = TFResNetModel(config=snake_case )
A = model(snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A_ ( self : Union[str, Any] , snake_case : str , snake_case : List[Any] , snake_case : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A = self.num_labels
A = TFResNetForImageClassification(snake_case )
A = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : List[str] ) -> Dict:
'''simple docstring'''
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( UpperCamelCase ,UpperCamelCase ,unittest.TestCase ):
lowerCAmelCase_ : str = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase_ : Optional[Any] = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase_ : int = False
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Any = False
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : List[Any] = False
def A_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
A = TFResNetModelTester(self )
A = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def A_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self : Dict ) -> int:
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def A_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def A_ ( self : int ) -> int:
'''simple docstring'''
pass
def A_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(snake_case )
A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def A_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def A_ ( self : List[str] ) -> Dict:
'''simple docstring'''
def check_hidden_states_output(snake_case : List[Any] , snake_case : Optional[int] , snake_case : Optional[Any] ):
A = model_class(snake_case )
A = model(**self._prepare_for_class(snake_case , snake_case ) )
A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A = self.model_tester.num_stages
self.assertEqual(len(snake_case ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A = layer_type
A = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def A_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def A_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFResNetModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowerCAmelCase__ ( ) -> Union[str, Any]:
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def A_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
A = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=snake_case , return_tensors='tf' )
# forward pass
A = model(**snake_case )
# verify the logits
A = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case )
A = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case , atol=1E-4 ) )
| 109
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 109
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 416
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_snake_case ).to(_snake_case )
__lowerCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__lowerCamelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
__lowerCamelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
__lowerCamelCase = model(input_ids.to(_snake_case ) , labels=labels.to(_snake_case ) ).loss
__lowerCamelCase = -(labels.shape[-1] * loss.item())
__lowerCamelCase = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 316
| 0
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCamelCase_ ( A_ , A_ , A_ = 10**-10 ):
__lowerCamelCase = a
while True:
__lowerCamelCase = Decimal(a_ ) - (
Decimal(eval(a_ ) ) / Decimal(eval(str(diff(a_ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(a_ ) ) < precision: # noqa: S307
return float(a_ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 718
|
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_UpperCamelCase : Any =logging.get_logger(__name__)
# General docstring
_UpperCamelCase : List[Any] ="PoolFormerConfig"
# Base docstring
_UpperCamelCase : List[str] ="sail/poolformer_s12"
_UpperCamelCase : List[Any] =[1, 5_12, 7, 7]
# Image classification docstring
_UpperCamelCase : List[str] ="sail/poolformer_s12"
_UpperCamelCase : Tuple ="tabby, tabby cat"
_UpperCamelCase : Optional[Any] =[
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCamelCase_ ( A_ , A_ = 0.0 , A_ = False ):
if drop_prob == 0.0 or not training:
return input
__lowerCamelCase = 1 - drop_prob
__lowerCamelCase = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__lowerCamelCase = keep_prob + torch.rand(A_ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
__lowerCamelCase = input.div(A_ ) * random_tensor
return output
class _SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self , _snake_case = None ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = drop_prob
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
return drop_path(_snake_case , self.drop_prob , self.training )
def _lowerCamelCase ( self ):
"""simple docstring"""
return "p={}".format(self.drop_prob )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = patch_size if isinstance(_snake_case , collections.abc.Iterable ) else (patch_size, patch_size)
__lowerCamelCase = stride if isinstance(_snake_case , collections.abc.Iterable ) else (stride, stride)
__lowerCamelCase = padding if isinstance(_snake_case , collections.abc.Iterable ) else (padding, padding)
__lowerCamelCase = nn.Convad(_snake_case , _snake_case , kernel_size=_snake_case , stride=_snake_case , padding=_snake_case )
__lowerCamelCase = norm_layer(_snake_case ) if norm_layer else nn.Identity()
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase = self.projection(_snake_case )
__lowerCamelCase = self.norm(_snake_case )
return embeddings
class _SCREAMING_SNAKE_CASE ( nn.GroupNorm ):
"""simple docstring"""
def __init__( self , _snake_case , **_snake_case ):
"""simple docstring"""
super().__init__(1 , _snake_case , **_snake_case )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self , _snake_case ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = nn.AvgPoolad(_snake_case , stride=1 , padding=pool_size // 2 , count_include_pad=_snake_case )
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
return self.pool(_snake_case ) - hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = nn.Convad(_snake_case , _snake_case , 1 )
__lowerCamelCase = nn.Convad(_snake_case , _snake_case , 1 )
__lowerCamelCase = PoolFormerDropPath(_snake_case )
if isinstance(config.hidden_act , _snake_case ):
__lowerCamelCase = ACTaFN[config.hidden_act]
else:
__lowerCamelCase = config.hidden_act
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase = self.conva(_snake_case )
__lowerCamelCase = self.act_fn(_snake_case )
__lowerCamelCase = self.drop(_snake_case )
__lowerCamelCase = self.conva(_snake_case )
__lowerCamelCase = self.drop(_snake_case )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = PoolFormerPooling(_snake_case )
__lowerCamelCase = PoolFormerOutput(_snake_case , _snake_case , _snake_case , _snake_case )
__lowerCamelCase = PoolFormerGroupNorm(_snake_case )
__lowerCamelCase = PoolFormerGroupNorm(_snake_case )
# Useful for training neural nets
__lowerCamelCase = PoolFormerDropPath(_snake_case ) if drop_path > 0.0 else nn.Identity()
__lowerCamelCase = config.use_layer_scale
if config.use_layer_scale:
__lowerCamelCase = nn.Parameter(
config.layer_scale_init_value * torch.ones((_snake_case) ) , requires_grad=_snake_case )
__lowerCamelCase = nn.Parameter(
config.layer_scale_init_value * torch.ones((_snake_case) ) , requires_grad=_snake_case )
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
if self.use_layer_scale:
__lowerCamelCase = self.pooling(self.before_norm(_snake_case ) )
__lowerCamelCase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
__lowerCamelCase = hidden_states + self.drop_path(_snake_case )
__lowerCamelCase = ()
__lowerCamelCase = self.output(self.after_norm(_snake_case ) )
__lowerCamelCase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
__lowerCamelCase = hidden_states + self.drop_path(_snake_case )
__lowerCamelCase = (output,) + outputs
return outputs
else:
__lowerCamelCase = self.drop_path(self.pooling(self.before_norm(_snake_case ) ) )
# First residual connection
__lowerCamelCase = pooling_output + hidden_states
__lowerCamelCase = ()
# Second residual connection inside the PoolFormerOutput block
__lowerCamelCase = self.drop_path(self.output(self.after_norm(_snake_case ) ) )
__lowerCamelCase = hidden_states + layer_output
__lowerCamelCase = (output,) + outputs
return outputs
class _SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self , _snake_case ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = config
# stochastic depth decay rule
__lowerCamelCase = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
__lowerCamelCase = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
__lowerCamelCase = nn.ModuleList(_snake_case )
# Transformer blocks
__lowerCamelCase = []
__lowerCamelCase = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
__lowerCamelCase = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_snake_case , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(_snake_case ) )
__lowerCamelCase = nn.ModuleList(_snake_case )
def _lowerCamelCase ( self , _snake_case , _snake_case=False , _snake_case=True ):
"""simple docstring"""
__lowerCamelCase = () if output_hidden_states else None
__lowerCamelCase = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
__lowerCamelCase , __lowerCamelCase = layers
# Get patch embeddings from hidden_states
__lowerCamelCase = embedding_layer(_snake_case )
# Send the embeddings through the blocks
for _, blk in enumerate(_snake_case ):
__lowerCamelCase = blk(_snake_case )
__lowerCamelCase = layer_outputs[0]
if output_hidden_states:
__lowerCamelCase = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_snake_case , hidden_states=_snake_case )
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = PoolFormerConfig
SCREAMING_SNAKE_CASE_ = 'poolformer'
SCREAMING_SNAKE_CASE_ = 'pixel_values'
SCREAMING_SNAKE_CASE_ = True
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
if isinstance(_snake_case , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_snake_case , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _lowerCamelCase ( self , _snake_case , _snake_case=False ):
"""simple docstring"""
if isinstance(_snake_case , _snake_case ):
__lowerCamelCase = value
_UpperCamelCase : Any =R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_UpperCamelCase : Tuple =R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , UpperCamelCase , )
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
def __init__( self , _snake_case ):
"""simple docstring"""
super().__init__(_snake_case )
__lowerCamelCase = config
__lowerCamelCase = PoolFormerEncoder(_snake_case )
# Initialize weights and apply final processing
self.post_init()
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_snake_case , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCamelCase ( self , _snake_case = None , _snake_case = None , _snake_case = None , ):
"""simple docstring"""
__lowerCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
__lowerCamelCase = self.encoder(
_snake_case , output_hidden_states=_snake_case , return_dict=_snake_case , )
__lowerCamelCase = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_snake_case , hidden_states=encoder_outputs.hidden_states , )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self , _snake_case ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = nn.Linear(config.hidden_size , config.hidden_size )
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase = self.dense(_snake_case )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , UpperCamelCase , )
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
def __init__( self , _snake_case ):
"""simple docstring"""
super().__init__(_snake_case )
__lowerCamelCase = config.num_labels
__lowerCamelCase = PoolFormerModel(_snake_case )
# Final norm
__lowerCamelCase = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
__lowerCamelCase = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCamelCase ( self , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , ):
"""simple docstring"""
__lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase = self.poolformer(
_snake_case , output_hidden_states=_snake_case , return_dict=_snake_case , )
__lowerCamelCase = outputs[0]
__lowerCamelCase = self.classifier(self.norm(_snake_case ).mean([-2, -1] ) )
__lowerCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCamelCase = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCamelCase = '''single_label_classification'''
else:
__lowerCamelCase = '''multi_label_classification'''
if self.config.problem_type == "regression":
__lowerCamelCase = MSELoss()
if self.num_labels == 1:
__lowerCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowerCamelCase = loss_fct(_snake_case , _snake_case )
elif self.config.problem_type == "single_label_classification":
__lowerCamelCase = CrossEntropyLoss()
__lowerCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowerCamelCase = BCEWithLogitsLoss()
__lowerCamelCase = loss_fct(_snake_case , _snake_case )
if not return_dict:
__lowerCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_snake_case , logits=_snake_case , hidden_states=outputs.hidden_states )
| 575
| 0
|
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
UpperCamelCase__ = TypeVar('''T''')
class lowerCamelCase_ ( Generic[T] ):
lowerCAmelCase__ = 42 # Cache store of keys
lowerCAmelCase__ = 42 # References of the keys in cache
lowerCAmelCase__ = 1_0 # Maximum capacity of cache
def __init__( self : Optional[int] , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = deque()
UpperCAmelCase__ : List[Any] = set()
if not n:
UpperCAmelCase__ : List[Any] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
UpperCAmelCase__ : Optional[int] = n
def lowercase_ ( self : int , _A : Any ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase__ : Union[str, Any] = self.dq_store.pop()
self.key_reference.remove(__snake_case )
else:
self.dq_store.remove(__snake_case )
self.dq_store.appendleft(__snake_case )
self.key_reference.add(__snake_case )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for k in self.dq_store:
print(__snake_case )
def __repr__( self : int ):
'''simple docstring'''
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 75
|
def UpperCAmelCase_ ( _UpperCAmelCase :list ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) < 2:
return collection
def circle_sort_util(_UpperCAmelCase :list , _UpperCAmelCase :int , _UpperCAmelCase :int ) -> bool:
A_ = False
if low == high:
return swapped
A_ = low
A_ = high
while left < right:
if collection[left] > collection[right]:
A_ , A_ = (
collection[right],
collection[left],
)
A_ = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
A_ , A_ = (
collection[right + 1],
collection[left],
)
A_ = True
A_ = low + int((high - low) / 2 )
A_ = circle_sort_util(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
A_ = circle_sort_util(_UpperCAmelCase , mid + 1 , _UpperCAmelCase )
return swapped or left_swap or right_swap
A_ = True
while is_not_sorted is True:
A_ = circle_sort_util(_UpperCAmelCase , 0 , len(_UpperCAmelCase ) - 1 )
return collection
if __name__ == "__main__":
a__ : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
a__ : List[str] = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 188
| 0
|
"""simple docstring"""
from manim import *
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : str = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase : Any = Rectangle(height=0.25 , width=0.25 )
UpperCAmelCase : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase : List[str] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCAmelCase : List[str] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCAmelCase : Union[str, Any] = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCAmelCase : Any = Text("""CPU""" , font_size=24 )
UpperCAmelCase : Optional[Any] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase : List[str] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCAmelCase : str = Text("""GPU""" , font_size=24 )
UpperCAmelCase : Optional[Any] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
gpu.move_to([-1, -1, 0] )
self.add(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase : Union[str, Any] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCAmelCase : str = Text("""Model""" , font_size=24 )
UpperCAmelCase : Union[str, Any] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.add(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = []
UpperCAmelCase : Dict = []
UpperCAmelCase : Any = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
rect.set_stroke(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_SCREAMING_SNAKE_CASE , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_SCREAMING_SNAKE_CASE )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_SCREAMING_SNAKE_CASE , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_SCREAMING_SNAKE_CASE , buff=0.0 )
self.add(_SCREAMING_SNAKE_CASE )
model_cpu_arr.append(_SCREAMING_SNAKE_CASE )
self.add(*_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = [mem.copy() for i in range(6 )]
UpperCAmelCase : Any = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCAmelCase : Optional[Any] = Text("""Loaded Checkpoint""" , font_size=24 )
UpperCAmelCase : Union[str, Any] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
checkpoint.move_to([3, 0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = []
UpperCAmelCase : Optional[int] = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Any = fill.copy().set_fill(_SCREAMING_SNAKE_CASE , opacity=0.7 )
target.move_to(_SCREAMING_SNAKE_CASE )
ckpt_arr.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_SCREAMING_SNAKE_CASE )
self.add(*_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase : int = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_SCREAMING_SNAKE_CASE , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = MarkupText(
F"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
UpperCAmelCase : List[Any] = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase : str = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase : Optional[int] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCAmelCase : Union[str, Any] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCAmelCase : Dict = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCAmelCase : List[str] = Text("""Disk""" , font_size=24 )
UpperCAmelCase : Dict = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=3 ) , Write(_SCREAMING_SNAKE_CASE , run_time=1 ) , Create(_SCREAMING_SNAKE_CASE , run_time=1 ) )
UpperCAmelCase : Tuple = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : List[str] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_SCREAMING_SNAKE_CASE , run_time=1.5 ) )
self.play(*_SCREAMING_SNAKE_CASE )
self.play(FadeOut(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : str = MarkupText(F"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=3 ) )
self.play(
FadeOut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) , )
self.wait()
| 359
|
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : str = tempfile.mkdtemp()
UpperCAmelCase : str = 8
# DPR tok
UpperCAmelCase : int = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = os.path.join(_SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
UpperCAmelCase : int = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCAmelCase : Tuple = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCAmelCase : List[Any] = {"""unk_token""": """<unk>"""}
UpperCAmelCase : int = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self ) -> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def SCREAMING_SNAKE_CASE ( self ) -> DPRContextEncoderTokenizer:
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def SCREAMING_SNAKE_CASE ( self ) -> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Dict = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.get_dummy_dataset()
UpperCAmelCase : List[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
UpperCAmelCase : Tuple = dataset
UpperCAmelCase : List[str] = RagRetriever(
_SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[Any] = self.get_dummy_dataset()
UpperCAmelCase : Any = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""custom""" , )
if from_disk:
UpperCAmelCase : str = os.path.join(self.tmpdirname , """dataset""" )
UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , """index.faiss""" )
dataset.get_index("""embeddings""" ).save(os.path.join(self.tmpdirname , """index.faiss""" ) )
dataset.drop_index("""embeddings""" )
dataset.save_to_disk(os.path.join(self.tmpdirname , """dataset""" ) )
del dataset
UpperCAmelCase : List[Any] = RagRetriever(
_SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
UpperCAmelCase : Optional[Any] = RagRetriever(
_SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _SCREAMING_SNAKE_CASE ) , )
return retriever
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , """hf_bert_base.hnswSQ8_correct_phi_128.c_index""" )
dataset.save_faiss_index("""embeddings""" , index_file_name + """.index.dpr""" )
pickle.dump(dataset["""id"""] , open(index_file_name + """.index_meta.dpr""" , """wb""" ) )
UpperCAmelCase : Dict = os.path.join(self.tmpdirname , """psgs_w100.tsv.pkl""" )
UpperCAmelCase : Any = {sample["""id"""]: [sample["""text"""], sample["""title"""]] for sample in dataset}
pickle.dump(_SCREAMING_SNAKE_CASE , open(_SCREAMING_SNAKE_CASE , """wb""" ) )
UpperCAmelCase : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""legacy""" , index_path=self.tmpdirname , )
UpperCAmelCase : str = RagRetriever(
_SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int = 1
UpperCAmelCase : List[Any] = self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = retriever.retrieve(_SCREAMING_SNAKE_CASE , n_docs=_SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
UpperCAmelCase : Optional[int] = self.get_dummy_dataset()
retriever.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = RagRetriever.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : Optional[Any] = retriever.retrieve(_SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Tuple = 1
UpperCAmelCase : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = retriever.retrieve(_SCREAMING_SNAKE_CASE , n_docs=_SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = RagRetriever.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : List[str] = retriever.retrieve(_SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 1
UpperCAmelCase : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = retriever.retrieve(_SCREAMING_SNAKE_CASE , n_docs=_SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = RagRetriever.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : int = retriever.retrieve(_SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple = 1
UpperCAmelCase : List[Any] = self.get_dummy_legacy_index_retriever()
UpperCAmelCase : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = retriever.retrieve(_SCREAMING_SNAKE_CASE , n_docs=_SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""text"""] ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["""text"""][0] , """bar""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""text"""][0] , """foo""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : str = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = RagRetriever.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : Tuple = retriever.retrieve(_SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
import torch
UpperCAmelCase : Dict = 1
UpperCAmelCase : List[Any] = self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase : List[str] = [[5, 7], [10, 11]]
UpperCAmelCase : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : Union[str, Any] = retriever(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=_SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = (
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
UpperCAmelCase : Any = retriever(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = ( # noqa: F841
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
out["""doc_ids"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : int = self.get_dpr_ctx_encoder_tokenizer()
UpperCAmelCase : Any = 1
UpperCAmelCase : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=_SCREAMING_SNAKE_CASE )
retriever.set_ctx_encoder_tokenizer(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = [[5, 7], [10, 11]]
UpperCAmelCase : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : Tuple = retriever(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=_SCREAMING_SNAKE_CASE )
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("""tokenized_doc_ids""", """tokenized_doc_attention_mask""") ) , _SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
| 359
| 1
|
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> str:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__UpperCamelCase : Union[str, Any] = F'Input value of [number={number}] must be an integer'
raise TypeError(SCREAMING_SNAKE_CASE__)
if number < 1:
__UpperCamelCase : Dict = F'Input value of [number={number}] must be > 0'
raise ValueError(SCREAMING_SNAKE_CASE__)
__UpperCamelCase : str = 1
for i in range(1 , SCREAMING_SNAKE_CASE__):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 557
|
import numpy as np
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] ):
__lowerCamelCase : int = (0, 0)
__lowerCamelCase : List[str] = None
__lowerCamelCase : int = 0
__lowerCamelCase : int = 0
__lowerCamelCase : Union[str, Any] = 0
def __eq__( self: Optional[int] , a: List[Any] ):
return self.position == cell.position
def _snake_case ( self: Any ):
print(self.position )
class A_ :
'''simple docstring'''
def __init__( self: str , a: List[str]=(5, 5) ):
__lowerCamelCase : Optional[Any] = np.zeros(a )
__lowerCamelCase : List[str] = world_size[0]
__lowerCamelCase : Optional[int] = world_size[1]
def _snake_case ( self: List[Any] ):
print(self.w )
def _snake_case ( self: Optional[int] , a: str ):
__lowerCamelCase : Tuple = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__lowerCamelCase : Optional[int] = cell.position[0]
__lowerCamelCase : List[str] = cell.position[1]
__lowerCamelCase : Dict = []
for n in neughbour_cord:
__lowerCamelCase : Dict = current_x + n[0]
__lowerCamelCase : Optional[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__lowerCamelCase : Optional[Any] = Cell()
__lowerCamelCase : Any = (x, y)
__lowerCamelCase : Dict = cell
neighbours.append(a )
return neighbours
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = []
__lowerCamelCase : int = []
_open.append(SCREAMING_SNAKE_CASE__ )
while _open:
__lowerCamelCase : Union[str, Any] = np.argmin([n.f for n in _open] )
__lowerCamelCase : int = _open[min_f]
_closed.append(_open.pop(SCREAMING_SNAKE_CASE__ ) )
if current == goal:
break
for n in world.get_neigbours(SCREAMING_SNAKE_CASE__ ):
for c in _closed:
if c == n:
continue
__lowerCamelCase : Optional[int] = current.g + 1
__lowerCamelCase , __lowerCamelCase : int = n.position
__lowerCamelCase , __lowerCamelCase : Tuple = goal.position
__lowerCamelCase : Dict = (ya - ya) ** 2 + (xa - xa) ** 2
__lowerCamelCase : str = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = []
while current.parent is not None:
path.append(current.position )
__lowerCamelCase : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowercase_ = Gridworld()
# Start position and goal
lowercase_ = Cell()
lowercase_ = (0, 0)
lowercase_ = Cell()
lowercase_ = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
lowercase_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowercase_ = 1
print(world.w)
| 669
| 0
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a_ = logging.getLogger(__name__)
def a__ ( __lowercase , __lowercase ) -> Union[str, Any]:
return (preds == labels).mean()
@dataclass
class snake_case :
__UpperCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
__UpperCamelCase = field(
default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
__UpperCamelCase = field(
default=lowercase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
__UpperCamelCase = field(
default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class snake_case :
__UpperCamelCase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys())})
__UpperCamelCase = field(metadata={'help': 'Should contain the data files for the task.'})
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=lowercase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'})
def a__ ( ) -> Optional[Any]:
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , _A )
# Set seed
set_seed(training_args.seed )
try:
_A = processors[data_args.task_name]()
_A = processor.get_labels()
_A = len(_A )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_A , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_A = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , )
# Get datasets
_A = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_A , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_A = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_A , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__lowercase ) -> Dict:
_A = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_A , p.label_ids )}
# Data collator
_A = DataCollatorWithPadding(_A , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_A = Trainer(
model=_A , args=_A , train_dataset=_A , eval_dataset=_A , compute_metrics=_A , data_collator=_A , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_A = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_A = trainer.evaluate()
_A = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_master():
with open(_A , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , _A , _A )
writer.write("%s = %s\n" % (key, value) )
results.update(_A )
return results
def a__ ( __lowercase ) -> str:
main()
if __name__ == "__main__":
main()
| 705
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[str]:
_A = state_dict.pop(__lowercase )
_A = val
def a__ ( __lowercase ) -> List[str]:
_A = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_A = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_A = value
else:
_A = value
return new_state_dict
def a__ ( __lowercase , __lowercase=False ) -> Any:
_A = ""
if is_panoptic:
_A = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[:256, :]
_A = in_proj_bias[:256]
_A = in_proj_weight[256:512, :]
_A = in_proj_bias[256:512]
_A = in_proj_weight[-256:, :]
_A = in_proj_bias[-256:]
def a__ ( ) -> int:
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a__ ( __lowercase , __lowercase ) -> Any:
_A = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_A = "resnet101"
if "dc5" in model_name:
_A = True
_A = "panoptic" in model_name
if is_panoptic:
_A = 250
else:
_A = 91
_A = "huggingface/label-files"
_A = "coco-detection-id2label.json"
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
# load image processor
_A = "coco_panoptic" if is_panoptic else "coco_detection"
_A = ConditionalDetrImageProcessor(format=__lowercase )
# prepare image
_A = prepare_img()
_A = image_processor(images=__lowercase , return_tensors="pt" )
_A = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
_A = torch.hub.load("DeppMeng/ConditionalDETR" , __lowercase , pretrained=__lowercase ).eval()
_A = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_A = "conditional_detr." + src
rename_key(__lowercase , __lowercase , __lowercase )
_A = rename_backbone_keys(__lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowercase , is_panoptic=__lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_A = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
_A = state_dict.pop(__lowercase )
_A = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_A = state_dict.pop(__lowercase )
_A = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
_A = state_dict.pop(__lowercase )
_A = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_A = state_dict.pop(__lowercase )
_A = val
# finally, create HuggingFace model and load state dict
_A = ConditionalDetrForSegmentation(__lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
model.push_to_hub(repo_id=__lowercase , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
_A = conditional_detr(__lowercase )
_A = model(__lowercase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
a_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 621
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Tuple = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272
| 0
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = False
while is_sorted is False: # Until all the indices are traversed keep looping
__UpperCamelCase :List[Any] = True
for i in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__UpperCamelCase , __UpperCamelCase :Optional[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
__UpperCamelCase :int = False
for i in range(1 , len(SCREAMING_SNAKE_CASE ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__UpperCamelCase , __UpperCamelCase :Any = input_list[i + 1], input_list[i]
# swapping if elements not in order
__UpperCamelCase :Dict = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
__lowercase = [int(x) for x in input().split()]
# inputing elements of the list in one line
__lowercase = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 452
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : List[str] = StableUnCLIPImgaImgPipeline
a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a__ : str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a__ : List[Any] = frozenset([] )
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = 32
__UpperCamelCase :int = embedder_hidden_size
# image encoding components
__UpperCamelCase :Dict = CLIPImageProcessor(crop_size=32 , size=32)
torch.manual_seed(0)
__UpperCamelCase :Tuple = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__lowercase , projection_dim=__lowercase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = StableUnCLIPImageNormalizer(embedding_dim=__lowercase)
__UpperCamelCase :Any = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''')
torch.manual_seed(0)
__UpperCamelCase :List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
torch.manual_seed(0)
__UpperCamelCase :str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ))
torch.manual_seed(0)
__UpperCamelCase :Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowercase , layers_per_block=1 , upcast_attention=__lowercase , use_linear_projection=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Dict = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='''v_prediction''' , set_alpha_to_one=__lowercase , steps_offset=1 , )
torch.manual_seed(0)
__UpperCamelCase :Any = AutoencoderKL()
__UpperCamelCase :Dict = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0 , __lowercase=True) -> Tuple:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :int = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :int = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase)).to(__lowercase)
if pil_image:
__UpperCamelCase :Tuple = input_image * 0.5 + 0.5
__UpperCamelCase :Any = input_image.clamp(0 , 1)
__UpperCamelCase :Any = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
__UpperCamelCase :int = DiffusionPipeline.numpy_to_pil(__lowercase)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Dict = self.get_dummy_components()
__UpperCamelCase :Dict = StableUnCLIPImgaImgPipeline(**__lowercase)
__UpperCamelCase :int = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Optional[int] = self.get_dummy_inputs(__lowercase)
inputs.update({'''image_embeds''': None})
__UpperCamelCase :Any = sd_pipe(**__lowercase).images
__UpperCamelCase :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase :Union[str, Any] = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Union[str, Any] = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=__lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :List[Any] = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__lowercase)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__lowercase)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :Optional[int] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :Union[str, Any] = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :str = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :Tuple = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :int = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
__UpperCamelCase :Any = pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :Tuple = pipe(
__lowercase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
__UpperCamelCase :str = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 452
| 1
|
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class A__:
def __init__( self : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict=13 , __SCREAMING_SNAKE_CASE : str=7 , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Dict=99 , __SCREAMING_SNAKE_CASE : Optional[Any]=64 , __SCREAMING_SNAKE_CASE : Dict=5 , __SCREAMING_SNAKE_CASE : Tuple=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=64 , __SCREAMING_SNAKE_CASE : Tuple="gelu" , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : str=5_12 , __SCREAMING_SNAKE_CASE : Optional[int]=16 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=4 , __SCREAMING_SNAKE_CASE : int=None , ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' )
def _a ( self : str ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MPNetModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(__lowerCAmelCase , __lowerCAmelCase )
__SCREAMING_SNAKE_CASE = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MPNetForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = MPNetForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_choices
__SCREAMING_SNAKE_CASE = MPNetForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = MPNetForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : str ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(__SCREAMING_SNAKE_CASE) = config_and_inputs
__SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowerCAmelCase = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowerCAmelCase = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = True
def _a ( self : str ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MPNetModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def _a ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*__lowerCAmelCase )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*__lowerCAmelCase )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*__lowerCAmelCase )
def _a ( self : str ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*__lowerCAmelCase )
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*__lowerCAmelCase )
@require_torch
class A__( unittest.TestCase ):
@slow
def _a ( self : Tuple ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MPNetModel.from_pretrained('''microsoft/mpnet-base''' )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__SCREAMING_SNAKE_CASE = model(__lowerCAmelCase )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __lowerCAmelCase )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 482
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class snake_case_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=30 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=32 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=10 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=None , __lowerCAmelCase=2 , ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = image_size
SCREAMING_SNAKE_CASE_ : Any = patch_size
SCREAMING_SNAKE_CASE_ : int = num_channels
SCREAMING_SNAKE_CASE_ : List[Any] = is_training
SCREAMING_SNAKE_CASE_ : List[Any] = use_labels
SCREAMING_SNAKE_CASE_ : Dict = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scope
SCREAMING_SNAKE_CASE_ : Dict = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE_ : List[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ : List[str] = num_patches + 2
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def __A ( self ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = DeiTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = DeiTForMaskedImageModeling(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(__lowerCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ : str = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = DeiTForMaskedImageModeling(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[int] = DeiTForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : List[Any] = DeiTForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : str = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__lowerCamelCase : Any = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__lowerCamelCase : int = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__lowerCamelCase : Dict = False
__lowerCamelCase : int = False
__lowerCamelCase : List[str] = False
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Any = DeiTModelTester(self )
SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def __A ( self ):
pass
def __A ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def __A ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : int = model_class(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
SCREAMING_SNAKE_CASE_ : Any = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __A ( self ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : str = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCAmelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE_ : str = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ : Tuple = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = model(**__lowerCAmelCase ).loss
loss.backward()
def __A ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : int = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ : Any = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = model(**__lowerCAmelCase ).loss
loss.backward()
def __A ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCAmelCase ),
*get_values(__lowerCAmelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
SCREAMING_SNAKE_CASE_ : Any = problem_type['title']
SCREAMING_SNAKE_CASE_ : List[str] = problem_type['num_labels']
SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ : Optional[int] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE_ : Any = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
SCREAMING_SNAKE_CASE_ : int = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCAmelCase ) as warning_list:
SCREAMING_SNAKE_CASE_ : List[str] = model(**__lowerCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def __A ( self ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : List[str] = DeiTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
@cached_property
def __A ( self ):
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_ : int = prepare_img()
SCREAMING_SNAKE_CASE_ : List[Any] = image_processor(images=__lowerCAmelCase , return_tensors='pt' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Tuple = model(**__lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' )
SCREAMING_SNAKE_CASE_ : int = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processor(images=__lowerCAmelCase , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : List[str] = inputs.pixel_values.to(__lowerCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : int = model(__lowerCAmelCase )
| 345
| 0
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case_ ( __lowercase , __lowercase ):
# Load checkpoint
UpperCAmelCase_ : Tuple = torch.load(__lowercase , map_location='''cpu''' )
UpperCAmelCase_ : Optional[int] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
UpperCAmelCase_ : str = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCAmelCase_ : Tuple = v
else:
UpperCAmelCase_ : Union[str, Any] = v
UpperCAmelCase_ : int = chkpt['''params''']
UpperCAmelCase_ : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(__lowercase , (torch.FloatTensor, numpy.ndarray) )}
UpperCAmelCase_ : int = chkpt['''dico_word2id''']
UpperCAmelCase_ : List[Any] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 1_3 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(__lowercase , __lowercase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : Dict = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 641
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : str = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __UpperCamelCase :
_UpperCAmelCase = 42 # [batch_size x 3]
_UpperCAmelCase = 42 # [batch_size x 3]
_UpperCAmelCase = 42 # [batch_size x 3]
_UpperCAmelCase = 42 # [batch_size x 3]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
def __lowerCamelCase ( self ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __lowerCamelCase ( self ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] ,dtype=np.floataa ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] ,dtype=np.floataa ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = torch.arange(self.height * self.width )
_lowerCAmelCase : Optional[int] = torch.stack(
[
pixel_indices % self.width,
torch.div(_lowerCamelCase ,self.width ,rounding_mode='trunc' ),
] ,axis=1 ,)
return coords
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.shape
_lowerCAmelCase : Any = int(np.prod(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = self.get_image_coords()
_lowerCAmelCase : Optional[Any] = torch.broadcast_to(coords.unsqueeze(0 ) ,[batch_size * inner_batch_size, *coords.shape] )
_lowerCAmelCase : List[Any] = self.get_camera_rays(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = rays.view(_lowerCamelCase ,inner_batch_size * self.height * self.width ,2 ,3 )
return rays
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_lowerCAmelCase : Any = coords.view(_lowerCamelCase ,-1 ,2 )
_lowerCAmelCase : str = self.resolution()
_lowerCAmelCase : Optional[int] = self.fov()
_lowerCAmelCase : Dict = (flat.float() / (res - 1)) * 2 - 1
_lowerCAmelCase : Union[str, Any] = fracs * torch.tan(fov / 2 )
_lowerCAmelCase : Union[str, Any] = fracs.view(_lowerCamelCase ,-1 ,2 )
_lowerCAmelCase : Tuple = (
self.z.view(_lowerCamelCase ,1 ,3 )
+ self.x.view(_lowerCamelCase ,1 ,3 ) * fracs[:, :, :1]
+ self.y.view(_lowerCamelCase ,1 ,3 ) * fracs[:, :, 1:]
)
_lowerCAmelCase : str = directions / directions.norm(dim=-1 ,keepdim=_lowerCamelCase )
_lowerCAmelCase : List[str] = torch.stack(
[
torch.broadcast_to(self.origin.view(_lowerCamelCase ,1 ,3 ) ,[batch_size, directions.shape[1], 3] ),
directions,
] ,dim=2 ,)
return rays.view(_lowerCamelCase ,*_lowerCamelCase ,2 ,3 )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin ,x=self.x ,y=self.y ,z=self.z ,width=_lowerCamelCase ,height=_lowerCamelCase ,x_fov=self.x_fov ,y_fov=self.y_fov ,)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = []
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : int = []
_lowerCAmelCase : List[str] = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
_lowerCAmelCase : Dict = np.array([np.sin(UpperCamelCase__ ), np.cos(UpperCamelCase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_lowerCAmelCase : Tuple = -z * 4
_lowerCAmelCase : Optional[int] = np.array([np.cos(UpperCamelCase__ ), -np.sin(UpperCamelCase__ ), 0.0] )
_lowerCAmelCase : Tuple = np.cross(UpperCamelCase__ , UpperCamelCase__ )
origins.append(UpperCamelCase__ )
xs.append(UpperCamelCase__ )
ys.append(UpperCamelCase__ )
zs.append(UpperCamelCase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(UpperCamelCase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(UpperCamelCase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(UpperCamelCase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(UpperCamelCase__ , axis=0 ) ).float() , width=UpperCamelCase__ , height=UpperCamelCase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(UpperCamelCase__ )) , )
| 259
|
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : int = f'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCamelCase__ )
if number < 1:
UpperCAmelCase__ : Optional[Any] = f'''Input value of [number={number}] must be > 0'''
raise ValueError(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = 1
for i in range(1 , UpperCamelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 407
| 0
|
from __future__ import annotations
def A_ ( snake_case : list[float] , snake_case : list[float] ) -> float:
'''simple docstring'''
__UpperCamelCase = sorted(numsa + numsa )
__UpperCamelCase , __UpperCamelCase = divmod(len(snake_case ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Optional[Any] = [float(x) for x in input("Enter the elements of first array: ").split()]
lowercase__ : Optional[Any] = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
| 451
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowercase__ : List[str] = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
| 451
| 1
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Dict =logging.get_logger(__name__)
A_ : List[str] ={
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
A_ : List[str] ={
'''b0''': {
'''hidden_dim''': 12_80,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 2_24,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 12_80,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 2_40,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 14_08,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 2_60,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 15_36,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 3_00,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 17_92,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 3_80,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 20_48,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 4_56,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 23_04,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 5_28,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 25_60,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 6_00,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def snake_case_ ( __snake_case : List[Any]) -> Tuple:
lowerCAmelCase_ = EfficientNetConfig()
lowerCAmelCase_ = CONFIG_MAP[model_name]['''hidden_dim''']
lowerCAmelCase_ = CONFIG_MAP[model_name]['''width_coef''']
lowerCAmelCase_ = CONFIG_MAP[model_name]['''depth_coef''']
lowerCAmelCase_ = CONFIG_MAP[model_name]['''image_size''']
lowerCAmelCase_ = CONFIG_MAP[model_name]['''dropout_rate''']
lowerCAmelCase_ = CONFIG_MAP[model_name]['''dw_padding''']
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ = 1000
lowerCAmelCase_ = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''') , '''r'''))
lowerCAmelCase_ = {int(__snake_case): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( ) -> List[str]:
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = Image.open(requests.get(__snake_case , stream=__snake_case).raw)
return im
def snake_case_ ( __snake_case : Optional[Any]) -> Tuple:
lowerCAmelCase_ = CONFIG_MAP[model_name]['''image_size''']
lowerCAmelCase_ = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__snake_case , )
return preprocessor
def snake_case_ ( __snake_case : str) -> str:
lowerCAmelCase_ = [v.split('''_''')[0].split('''block''')[1] for v in original_param_names if v.startswith('''block''')]
lowerCAmelCase_ = sorted(set(__snake_case))
lowerCAmelCase_ = len(__snake_case)
lowerCAmelCase_ = {b: str(__snake_case) for b, i in zip(__snake_case , range(__snake_case))}
lowerCAmelCase_ = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight'''))
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight'''))
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias'''))
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean'''))
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var'''))
for b in block_names:
lowerCAmelCase_ = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight'''))
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight'''))
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias'''))
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean'''))
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var'''))
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight'''))
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight'''))
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias'''))
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean'''))
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var'''))
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight'''))
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias'''))
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight'''))
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias'''))
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight'''))
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight'''))
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias'''))
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean'''))
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var'''))
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight'''))
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight'''))
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias'''))
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean'''))
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var'''))
lowerCAmelCase_ = {}
for item in rename_keys:
if item[0] in original_param_names:
lowerCAmelCase_ = '''efficientnet.''' + item[1]
lowerCAmelCase_ = '''classifier.weight'''
lowerCAmelCase_ = '''classifier.bias'''
return key_mapping
def snake_case_ ( __snake_case : Dict , __snake_case : List[Any] , __snake_case : str) -> Optional[int]:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowerCAmelCase_ = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowerCAmelCase_ = torch.from_numpy(__snake_case).permute(3 , 2 , 0 , 1)
elif "depthwise_kernel" in key:
lowerCAmelCase_ = torch.from_numpy(__snake_case).permute(2 , 3 , 0 , 1)
elif "kernel" in key:
lowerCAmelCase_ = torch.from_numpy(np.transpose(__snake_case))
else:
lowerCAmelCase_ = torch.from_numpy(__snake_case)
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__snake_case)
@torch.no_grad()
def snake_case_ ( __snake_case : int , __snake_case : List[Any] , __snake_case : Dict , __snake_case : Union[str, Any]) -> Tuple:
lowerCAmelCase_ = model_classes[model_name](
include_top=__snake_case , weights='''imagenet''' , input_tensor=__snake_case , input_shape=__snake_case , pooling=__snake_case , classes=1000 , classifier_activation='''softmax''' , )
lowerCAmelCase_ = original_model.trainable_variables
lowerCAmelCase_ = original_model.non_trainable_variables
lowerCAmelCase_ = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowerCAmelCase_ = param.numpy()
lowerCAmelCase_ = list(tf_params.keys())
# Load HuggingFace model
lowerCAmelCase_ = get_efficientnet_config(__snake_case)
lowerCAmelCase_ = EfficientNetForImageClassification(__snake_case).eval()
lowerCAmelCase_ = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''')
lowerCAmelCase_ = rename_keys(__snake_case)
replace_params(__snake_case , __snake_case , __snake_case)
# Initialize preprocessor and preprocess input image
lowerCAmelCase_ = convert_image_processor(__snake_case)
lowerCAmelCase_ = preprocessor(images=prepare_img() , return_tensors='''pt''')
# HF model inference
hf_model.eval()
with torch.no_grad():
lowerCAmelCase_ = hf_model(**__snake_case)
lowerCAmelCase_ = outputs.logits.detach().numpy()
# Original model inference
lowerCAmelCase_ = False
lowerCAmelCase_ = CONFIG_MAP[model_name]['''image_size''']
lowerCAmelCase_ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST)
lowerCAmelCase_ = image.img_to_array(__snake_case)
lowerCAmelCase_ = np.expand_dims(__snake_case , axis=0)
lowerCAmelCase_ = original_model.predict(__snake_case)
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__snake_case , __snake_case , atol=1E-3), "The predicted logits are not the same."
print('''Model outputs match!''')
if save_model:
# Create folder to save model
if not os.path.isdir(__snake_case):
os.mkdir(__snake_case)
# Save converted model and image processor
hf_model.save_pretrained(__snake_case)
preprocessor.save_pretrained(__snake_case)
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''')
lowerCAmelCase_ = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(__snake_case)
hf_model.push_to_hub(__snake_case)
if __name__ == "__main__":
A_ : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
A_ : Any =parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 274
|
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Tuple =logging.get_logger(__name__)
A_ : Dict ={
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class __UpperCAmelCase ( __a ):
__A : Union[str, Any] = 'efficientnet'
def __init__( self , _lowerCamelCase = 3 , _lowerCamelCase = 600 , _lowerCamelCase = 2.0 , _lowerCamelCase = 3.1 , _lowerCamelCase = 8 , _lowerCamelCase = [3, 3, 5, 3, 5, 5, 3] , _lowerCamelCase = [32, 16, 24, 40, 80, 112, 192] , _lowerCamelCase = [16, 24, 40, 80, 112, 192, 320] , _lowerCamelCase = [] , _lowerCamelCase = [1, 2, 2, 2, 1, 2, 1] , _lowerCamelCase = [1, 2, 2, 3, 3, 4, 1] , _lowerCamelCase = [1, 6, 6, 6, 6, 6, 6] , _lowerCamelCase = 0.25 , _lowerCamelCase = "swish" , _lowerCamelCase = 2560 , _lowerCamelCase = "mean" , _lowerCamelCase = 0.02 , _lowerCamelCase = 0.0_01 , _lowerCamelCase = 0.99 , _lowerCamelCase = 0.5 , _lowerCamelCase = 0.2 , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = width_coefficient
lowerCAmelCase_ = depth_coefficient
lowerCAmelCase_ = depth_divisor
lowerCAmelCase_ = kernel_sizes
lowerCAmelCase_ = in_channels
lowerCAmelCase_ = out_channels
lowerCAmelCase_ = depthwise_padding
lowerCAmelCase_ = strides
lowerCAmelCase_ = num_block_repeats
lowerCAmelCase_ = expand_ratios
lowerCAmelCase_ = squeeze_expansion_ratio
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dim
lowerCAmelCase_ = pooling_type
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = batch_norm_eps
lowerCAmelCase_ = batch_norm_momentum
lowerCAmelCase_ = dropout_rate
lowerCAmelCase_ = drop_connect_rate
lowerCAmelCase_ = sum(_lowerCamelCase ) * 4
class __UpperCAmelCase ( __a ):
__A : str = version.parse('1.11' )
@property
def UpperCAmelCase_ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase_ ( self ):
return 1E-5
| 274
| 1
|
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :float , a_ :float , a_ :float , ) -> tuple[str, float]:
if (stress, tangential_force, area).count(0) != 1:
raise ValueError('''You cannot supply more or less than 2 values''')
elif stress < 0:
raise ValueError('''Stress cannot be negative''')
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''')
elif area < 0:
raise ValueError('''Area cannot be negative''')
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
|
"""simple docstring"""
from math import isqrt, loga
def __A ( a_ :int) -> list[int]:
__a : int = [True] * max_number
for i in range(2 , isqrt(max_number - 1) + 1):
if is_prime[i]:
for j in range(i**2 , a_ , a_):
__a : int = False
return [i for i in range(2 , a_) if is_prime[i]]
def __A ( a_ :int = 80_08_00 , a_ :int = 80_08_00) -> int:
__a : str = degree * loga(a_)
__a : Tuple = int(a_)
__a : int = calculate_prime_numbers(a_)
__a : List[Any] = 0
__a : Optional[Any] = 0
__a : Dict = len(a_) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left])
+ prime_numbers[left] * loga(prime_numbers[right])
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'{solution() = }')
| 101
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ : Optional[int] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 456
|
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : Tuple = logging.get_logger(__name__)
A_ : Tuple = {'vocab_file': 'spiece.model'}
A_ : Optional[Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
A_ : Tuple = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__: str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: str = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__: List[int] = []
def __init__( self , A__ , A__="<unk>" , A__="<s>" , A__="</s>" , A__="<pad>" , A__="[SEP]" , A__="[MASK]" , A__="[CLS]" , A__ = None , **A__ , ):
A__ : Tuple = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else bos_token
A__ : int = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else eos_token
A__ : Tuple = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else unk_token
A__ : str = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else pad_token
A__ : Dict = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else cls_token
A__ : Any = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A__ : Optional[Any] = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else mask_token
A__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A__ , eos_token=A__ , unk_token=A__ , pad_token=A__ , sep_token=A__ , mask_token=A__ , cls_token=A__ , sp_model_kwargs=self.sp_model_kwargs , **A__ , )
A__ : Any = vocab_file
A__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A__ )
@property
def __A ( self ):
return self.sp_model.get_piece_size()
def __A ( self ):
A__ : List[str] = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
A__ : Optional[Any] = self.__dict__.copy()
A__ : Optional[Any] = None
return state
def __setstate__( self , A__ ):
A__ : Dict = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ : str = {}
A__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self , A__ ):
return self.sp_model.encode(A__ , out_type=A__ )
def __A ( self , A__ ):
return self.sp_model.piece_to_id(A__ )
def __A ( self , A__ ):
A__ : Dict = self.sp_model.IdToPiece(A__ )
return token
def __A ( self , A__ ):
A__ : str = []
A__ : Optional[int] = """"""
A__ : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A__ ) + token
A__ : Union[str, Any] = True
A__ : Union[str, Any] = []
else:
current_sub_tokens.append(A__ )
A__ : Any = False
out_string += self.sp_model.decode(A__ )
return out_string.strip()
def __A ( self , A__ , A__ = False , A__ = None , A__ = True , **A__ , ):
A__ : List[str] = kwargs.pop("""use_source_tokenizer""" , A__ )
A__ : Union[str, Any] = self.convert_ids_to_tokens(A__ , skip_special_tokens=A__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A__ : Dict = []
A__ : List[Any] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A__ ) )
A__ : Optional[Any] = []
sub_texts.append(A__ )
else:
current_sub_text.append(A__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
A__ : List[Any] = re.sub(r""" (\[(MASK|SEP)\])""" , r"""\1""" , """ """.join(A__ ) )
else:
A__ : Optional[Any] = """""".join(A__ )
A__ : Union[str, Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A__ : Union[str, Any] = self.clean_up_tokenization(A__ )
return clean_text
else:
return text
def __A ( self , A__ , A__ = None ):
if not os.path.isdir(A__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ : str = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , """wb""" ) as fi:
A__ : Dict = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
def __A ( self , A__ , A__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ : List[Any] = [self.cls_token_id]
A__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , A__ , A__ = None , A__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
if token_ids_a is None:
return [1] + ([0] * len(A__ )) + [1]
return [1] + ([0] * len(A__ )) + [1] + ([0] * len(A__ )) + [1]
def __A ( self , A__ , A__ = None ):
A__ : Any = [self.sep_token_id]
A__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 456
| 1
|
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[str] ):
snake_case__ : dict[str, TrieNode] = {} # Mapping from char to TrieNode
snake_case__ : List[Any] = False
def _lowercase ( self : str , __A : List[Any] ):
for word in words:
self.insert(_lowercase )
def _lowercase ( self : Tuple , __A : Optional[Any] ):
snake_case__ : Any = self
for char in word:
if char not in curr.nodes:
snake_case__ : Optional[int] = TrieNode()
snake_case__ : List[str] = curr.nodes[char]
snake_case__ : List[Any] = True
def _lowercase ( self : List[Any] , __A : int ):
snake_case__ : Tuple = self
for char in word:
if char not in curr.nodes:
return False
snake_case__ : Optional[int] = curr.nodes[char]
return curr.is_leaf
def _lowercase ( self : Dict , __A : List[Any] ):
def _delete(__A : Dict , __A : List[str] , __A : List[Any] ) -> bool:
if index == len(_lowercase ):
# If word does not exist
if not curr.is_leaf:
return False
snake_case__ : Optional[Any] = False
return len(curr.nodes ) == 0
snake_case__ : Optional[int] = word[index]
snake_case__ : Any = curr.nodes.get(_lowercase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
snake_case__ : Union[str, Any] = _delete(_lowercase , _lowercase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _lowercase , 0 )
def SCREAMING_SNAKE_CASE ( snake_case_ : TrieNode , snake_case_ : str ):
if node.is_leaf:
print(_lowerCamelCase , end=" " )
for key, value in node.nodes.items():
print_words(_lowerCamelCase , word + key )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Union[str, Any] = """banana bananas bandana band apple all beast""".split()
snake_case__ : List[Any] = TrieNode()
root.insert_many(_lowerCamelCase )
# print_words(root, "")
assert all(root.find(_lowerCamelCase ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : bool ):
print(str(_lowerCamelCase ) , "works!" if passes else "doesn't work :(" )
def SCREAMING_SNAKE_CASE ( ):
assert test_trie()
def SCREAMING_SNAKE_CASE ( ):
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 718
|
from __future__ import annotations
import time
__lowerCamelCase : str = list[tuple[int, int]]
__lowerCamelCase : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : int , __A : int , __A : int , __A : int , __A : Node | None ):
snake_case__ : Optional[int] = pos_x
snake_case__ : Dict = pos_y
snake_case__ : int = (pos_y, pos_x)
snake_case__ : Optional[int] = goal_x
snake_case__ : Tuple = goal_y
snake_case__ : str = parent
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : tuple[int, int] , __A : tuple[int, int] ):
snake_case__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , __A )
snake_case__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , __A )
snake_case__ : int = [self.start]
snake_case__ : Union[str, Any] = False
def _lowercase ( self : Dict ):
while self.node_queue:
snake_case__ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case__ : Optional[Any] = True
return self.retrace_path(__A )
snake_case__ : int = self.get_successors(__A )
for node in successors:
self.node_queue.append(__A )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Union[str, Any] , __A : Node ):
snake_case__ : str = []
for action in delta:
snake_case__ : str = parent.pos_x + action[1]
snake_case__ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__A , __A , self.target.pos_y , self.target.pos_x , __A ) )
return successors
def _lowercase ( self : Optional[Any] , __A : Node | None ):
snake_case__ : Tuple = node
snake_case__ : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ : Tuple = current_node.parent
path.reverse()
return path
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Dict , __A : str , __A : int ):
snake_case__ : str = BreadthFirstSearch(__A , __A )
snake_case__ : int = BreadthFirstSearch(__A , __A )
snake_case__ : Tuple = False
def _lowercase ( self : Optional[Any] ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case__ : Any = self.fwd_bfs.node_queue.pop(0 )
snake_case__ : List[str] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case__ : List[str] = True
return self.retrace_bidirectional_path(
__A , __A )
snake_case__ : Union[str, Any] = current_bwd_node
snake_case__ : Dict = current_fwd_node
snake_case__ : List[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(__A ),
self.bwd_bfs: self.bwd_bfs.get_successors(__A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowercase ( self : Any , __A : Node , __A : Node ):
snake_case__ : List[str] = self.fwd_bfs.retrace_path(__A )
snake_case__ : Optional[Any] = self.bwd_bfs.retrace_path(__A )
bwd_path.pop()
bwd_path.reverse()
snake_case__ : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowerCamelCase : str = (0, 0)
__lowerCamelCase : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCamelCase : Any = time.time()
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(init, goal)
__lowerCamelCase : str = bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
__lowerCamelCase : Optional[Any] = time.time()
__lowerCamelCase : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
__lowerCamelCase : str = bd_bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 25
| 0
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A, A : int = len(snake_case__ ), len(grid[0] )
if (
min(snake_case__ , snake_case__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A : Dict = 0
count += depth_first_search(snake_case__ , row + 1 , snake_case__ , snake_case__ )
count += depth_first_search(snake_case__ , row - 1 , snake_case__ , snake_case__ )
count += depth_first_search(snake_case__ , snake_case__ , col + 1 , snake_case__ )
count += depth_first_search(snake_case__ , snake_case__ , col - 1 , snake_case__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634
|
'''simple docstring'''
import random
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[Any] = a[left_index]
A : List[str] = left_index + 1
for j in range(left_index + 1 , snake_case__ ):
if a[j] < pivot:
A, A : Optional[int] = a[i], a[j]
i += 1
A, A : str = a[i - 1], a[left_index]
return i - 1
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if left < right:
A : Optional[Any] = random.randint(snake_case__ , right - 1 )
A, A : List[str] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
A : Any = partition(snake_case__ , snake_case__ , snake_case__ )
quick_sort_random(
snake_case__ , snake_case__ , snake_case__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
snake_case__ , pivot_index + 1 , snake_case__ ) # recursive quicksort to the right of the pivot point
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Any = input('''Enter numbers separated by a comma:\n''' ).strip()
A : List[str] = [int(snake_case__ ) for item in user_input.split(''',''' )]
quick_sort_random(snake_case__ , 0 , len(snake_case__ ) )
print(snake_case__ )
if __name__ == "__main__":
main()
| 634
| 1
|
"""simple docstring"""
import operator as op
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = lambda _UpperCamelCase , _UpperCamelCase : int(x / y ) # noqa: E731 integer division operation
__lowerCAmelCase = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(_UpperCamelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_UpperCamelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(_UpperCamelCase ) , sep=" | " )
else:
__lowerCAmelCase = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(_UpperCamelCase ) , sep=" | " )
__lowerCAmelCase = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(_UpperCamelCase ) , sep=" | " )
stack.append(
str(opr[x](int(_UpperCamelCase ) , int(_UpperCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(_UpperCamelCase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
A : Union[str, Any] = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 282
|
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
A : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a=7_68 ):
super().__init__(__a )
__lowerCAmelCase = proj_size
__lowerCAmelCase = CLIPVisionModel(__a )
__lowerCAmelCase = PaintByExampleMapper(__a )
__lowerCAmelCase = nn.LayerNorm(config.hidden_size )
__lowerCAmelCase = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__lowerCAmelCase = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def snake_case ( self , __a , __a=False ):
__lowerCAmelCase = self.model(pixel_values=__a )
__lowerCAmelCase = clip_output.pooler_output
__lowerCAmelCase = self.mapper(latent_states[:, None] )
__lowerCAmelCase = self.final_layer_norm(__a )
__lowerCAmelCase = self.proj_out(__a )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCAmelCase = (config.num_hidden_layers + 1) // 5
__lowerCAmelCase = config.hidden_size
__lowerCAmelCase = 1
__lowerCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(__a , __a , __a , activation_fn="gelu" , attention_bias=__a )
for _ in range(__a )
] )
def snake_case ( self , __a ):
for block in self.blocks:
__lowerCAmelCase = block(__a )
return hidden_states
| 282
| 1
|
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : str ) -> int:
'''simple docstring'''
UpperCAmelCase_ = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
UpperCAmelCase_ = MaskFormerConfig(backbone_config=snake_case_ )
UpperCAmelCase_ = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
UpperCAmelCase_ = 8_47
UpperCAmelCase_ = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
UpperCAmelCase_ = 1_50
UpperCAmelCase_ = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
UpperCAmelCase_ = 1_71
UpperCAmelCase_ = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
UpperCAmelCase_ = 1_33
UpperCAmelCase_ = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
UpperCAmelCase_ = 19
UpperCAmelCase_ = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
UpperCAmelCase_ = 65
UpperCAmelCase_ = "mapillary-vistas-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = dct.pop(snake_case_ )
UpperCAmelCase_ = val
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:dim, :]
UpperCAmelCase_ = in_proj_bias[: dim]
UpperCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ = in_proj_weight[
-dim :, :
]
UpperCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ = in_proj_bias[-hidden_size :]
# fmt: on
def lowerCAmelCase_ ( ) -> torch.Tensor:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , snake_case_ : str , snake_case_ : bool = False ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = get_maskformer_config(snake_case_ )
# load original state_dict
with open(snake_case_ , "rb" ) as f:
UpperCAmelCase_ = pickle.load(snake_case_ )
UpperCAmelCase_ = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCAmelCase_ = create_rename_keys(snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
read_in_swin_q_k_v(snake_case_ , config.backbone_config )
read_in_decoder_q_k_v(snake_case_ , snake_case_ )
# update to torch tensors
for key, value in state_dict.items():
UpperCAmelCase_ = torch.from_numpy(snake_case_ )
# load 🤗 model
UpperCAmelCase_ = MaskFormerForInstanceSegmentation(snake_case_ )
model.eval()
for name, param in model.named_parameters():
print(snake_case_ , param.shape )
UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(snake_case_ , strict=snake_case_ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(snake_case_ ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
UpperCAmelCase_ = prepare_img()
if "vistas" in model_name:
UpperCAmelCase_ = 65
elif "cityscapes" in model_name:
UpperCAmelCase_ = 6_55_35
else:
UpperCAmelCase_ = 2_55
UpperCAmelCase_ = True if "ade" in model_name else False
UpperCAmelCase_ = MaskFormerImageProcessor(ignore_index=snake_case_ , reduce_labels=snake_case_ )
UpperCAmelCase_ = image_processor(snake_case_ , return_tensors="pt" )
UpperCAmelCase_ = model(**snake_case_ )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCAmelCase_ = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE_: Tuple =parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 78
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __A :
a__ : int
a__ : TreeNode | None = None
a__ : TreeNode | None = None
SCREAMING_SNAKE_CASE_: Union[str, Any] =namedtuple('CoinsDistribResult', 'moves excess')
def lowerCAmelCase_ ( snake_case_ : TreeNode | None ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(snake_case_ ) != count_coins(snake_case_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(snake_case_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.left )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.right )
UpperCAmelCase_ = 1 - left_distrib_excess
UpperCAmelCase_ = 1 - right_distrib_excess
UpperCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(snake_case_ )
+ abs(snake_case_ )
)
UpperCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(snake_case_ , snake_case_ )
return get_distrib(snake_case_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """levit"""
def __init__( self :Optional[Any] , lowerCamelCase_ :int=2_24 , lowerCamelCase_ :Union[str, Any]=3 , lowerCamelCase_ :Tuple=3 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Optional[int]=1 , lowerCamelCase_ :Dict=16 , lowerCamelCase_ :Dict=[1_28, 2_56, 3_84] , lowerCamelCase_ :Any=[4, 8, 12] , lowerCamelCase_ :List[Any]=[4, 4, 4] , lowerCamelCase_ :Union[str, Any]=[16, 16, 16] , lowerCamelCase_ :Union[str, Any]=0 , lowerCamelCase_ :Tuple=[2, 2, 2] , lowerCamelCase_ :Tuple=[2, 2, 2] , lowerCamelCase_ :List[str]=0.0_2 , **lowerCamelCase_ :int , ) -> Any:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = image_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : Any = kernel_size
SCREAMING_SNAKE_CASE : Tuple = stride
SCREAMING_SNAKE_CASE : Dict = padding
SCREAMING_SNAKE_CASE : int = hidden_sizes
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = depths
SCREAMING_SNAKE_CASE : List[Any] = key_dim
SCREAMING_SNAKE_CASE : Optional[int] = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = patch_size
SCREAMING_SNAKE_CASE : List[str] = attention_ratio
SCREAMING_SNAKE_CASE : Optional[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : List[str] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.11""" )
@property
def __lowerCAmelCase ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCAmelCase ( self :str ) -> float:
'''simple docstring'''
return 1E-4
| 18
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """layoutlmv3"""
def __init__( self :str , lowerCamelCase_ :Optional[Any]=5_02_65 , lowerCamelCase_ :Dict=7_68 , lowerCamelCase_ :Union[str, Any]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :Union[str, Any]=30_72 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Any=5_12 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-5 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :int=0 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :List[str]=10_24 , lowerCamelCase_ :Tuple=1_28 , lowerCamelCase_ :Any=1_28 , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :str=32 , lowerCamelCase_ :int=1_28 , lowerCamelCase_ :int=64 , lowerCamelCase_ :List[Any]=2_56 , lowerCamelCase_ :Any=True , lowerCamelCase_ :str=True , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :List[str]=2_24 , lowerCamelCase_ :Dict=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :Any=None , **lowerCamelCase_ :Optional[Any] , ) -> int:
'''simple docstring'''
super().__init__(
vocab_size=lowerCamelCase_ , hidden_size=lowerCamelCase_ , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , intermediate_size=lowerCamelCase_ , hidden_act=lowerCamelCase_ , hidden_dropout_prob=lowerCamelCase_ , attention_probs_dropout_prob=lowerCamelCase_ , max_position_embeddings=lowerCamelCase_ , type_vocab_size=lowerCamelCase_ , initializer_range=lowerCamelCase_ , layer_norm_eps=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : Tuple = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : int = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Dict = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Optional[int] = text_embed
SCREAMING_SNAKE_CASE : Any = visual_embed
SCREAMING_SNAKE_CASE : Any = input_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : str = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.12""" )
@property
def __lowerCAmelCase ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __lowerCAmelCase ( self :Optional[int] ) -> float:
'''simple docstring'''
return 1E-5
@property
def __lowerCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
return 12
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :"ProcessorMixin" , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional["TensorType"] = None , lowerCamelCase_ :int = 3 , lowerCamelCase_ :int = 40 , lowerCamelCase_ :int = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Dict = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : int = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = dict(
processor(
lowerCamelCase_ , text=lowerCamelCase_ , boxes=lowerCamelCase_ , return_tensors=lowerCamelCase_ , ) )
return inputs
| 18
| 1
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ = 16
a_ = 32
def lowerCamelCase__ ( _a , _a = 16 , _a = "bert-base-cased"):
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(_a)
SCREAMING_SNAKE_CASE : str = load_dataset("glue" , "mrpc")
def tokenize_function(_a):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_a , max_length=_a)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE : Optional[int] = datasets.map(
_a , batched=_a , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=_a)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenized_datasets.rename_column("label" , "labels")
def collate_fn(_a):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_a , padding="max_length" , max_length=128 , return_tensors="pt")
return tokenizer.pad(_a , padding="longest" , return_tensors="pt")
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : Any = DataLoader(
tokenized_datasets["train"] , shuffle=_a , collate_fn=_a , batch_size=_a)
SCREAMING_SNAKE_CASE : Optional[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_a , collate_fn=_a , batch_size=_a)
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( _a , _a):
# Initialize accelerator
SCREAMING_SNAKE_CASE : Dict = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : int = config["lr"]
SCREAMING_SNAKE_CASE : Any = int(config["num_epochs"])
SCREAMING_SNAKE_CASE : Tuple = int(config["seed"])
SCREAMING_SNAKE_CASE : Tuple = int(config["batch_size"])
SCREAMING_SNAKE_CASE : Dict = args.model_name_or_path
set_seed(_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = get_dataloaders(_a , _a , _a)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained(_a , return_dict=_a)
# Instantiate optimizer
SCREAMING_SNAKE_CASE : Optional[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
SCREAMING_SNAKE_CASE : Dict = optimizer_cls(params=model.parameters() , lr=_a)
if accelerator.state.deepspeed_plugin is not None:
SCREAMING_SNAKE_CASE : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : Optional[Any] = (len(_a) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
SCREAMING_SNAKE_CASE : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_a , num_warmup_steps=0 , num_training_steps=_a , )
else:
SCREAMING_SNAKE_CASE : Dict = DummyScheduler(_a , total_num_steps=_a , warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = accelerator.prepare(
_a , _a , _a , _a , _a)
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE : Dict = 0
# We also need to keep track of the stating epoch so files are named properly
SCREAMING_SNAKE_CASE : Any = 0
# Now we train the model
SCREAMING_SNAKE_CASE : Optional[int] = evaluate.load("glue" , "mrpc")
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : str = {}
for epoch in range(_a , _a):
model.train()
for step, batch in enumerate(_a):
SCREAMING_SNAKE_CASE : Dict = model(**_a)
SCREAMING_SNAKE_CASE : int = outputs.loss
SCREAMING_SNAKE_CASE : Tuple = loss / gradient_accumulation_steps
accelerator.backward(_a)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for step, batch in enumerate(_a):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**_a)
SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.argmax(dim=-1)
# It is slightly faster to call this once, than multiple times
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = accelerator.gather(
(predictions, batch["labels"])) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_a) - 1:
SCREAMING_SNAKE_CASE : str = predictions[: len(eval_dataloader.dataset) - samples_seen]
SCREAMING_SNAKE_CASE : Tuple = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_a , references=_a , )
SCREAMING_SNAKE_CASE : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _a)
SCREAMING_SNAKE_CASE : Tuple = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
SCREAMING_SNAKE_CASE : str = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json") , "w") as f:
json.dump(_a , _a)
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.")
parser.add_argument(
"--model_name_or_path" , type=_a , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_a , )
parser.add_argument(
"--output_dir" , type=_a , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=_a , default=_a , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=_a , default=3 , help="Number of train epochs." , )
SCREAMING_SNAKE_CASE : str = parser.parse_args()
SCREAMING_SNAKE_CASE : List[Any] = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(_a , _a)
if __name__ == "__main__":
main()
| 25
|
"""simple docstring"""
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class _lowercase ( unittest.TestCase ):
_lowerCamelCase = inspect.getfile(accelerate.test_utils )
_lowerCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
_lowerCamelCase = ['''accelerate''', '''launch''']
_lowerCamelCase = Path.home() / '''.cache/huggingface/accelerate'''
_lowerCamelCase = '''default_config.yaml'''
_lowerCamelCase = config_folder / config_file
_lowerCamelCase = config_folder / '''_default_config.yaml'''
_lowerCamelCase = Path('''tests/test_configs''' )
@classmethod
def lowerCAmelCase__ ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowerCAmelCase__ ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowerCAmelCase__ ( self ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=UpperCamelCase_ ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(UpperCamelCase_ ), self.test_file_path] , env=os.environ.copy() )
def lowerCAmelCase__ ( self ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class _lowercase ( unittest.TestCase ):
_lowerCamelCase = '''test-tpu'''
_lowerCamelCase = '''us-central1-a'''
_lowerCamelCase = '''ls'''
_lowerCamelCase = ['''accelerate''', '''tpu-config''']
_lowerCamelCase = '''cd /usr/share'''
_lowerCamelCase = '''tests/test_samples/test_command_file.sh'''
_lowerCamelCase = '''Running gcloud compute tpus tpu-vm ssh'''
def lowerCAmelCase__ ( self ):
__magic_name__ = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=UpperCamelCase_ , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
__magic_name__ = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=UpperCamelCase_ , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
__magic_name__ = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=UpperCamelCase_ )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
__magic_name__ = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=UpperCamelCase_ , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
__magic_name__ = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=UpperCamelCase_ , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
__magic_name__ = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=UpperCamelCase_ , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
__magic_name__ = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=UpperCamelCase_ , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
__magic_name__ = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=UpperCamelCase_ , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
__magic_name__ = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=UpperCamelCase_ , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase_ , )
| 490
| 0
|
lowercase_ = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 456
|
def __lowerCAmelCase ( __lowerCamelCase : List[Any] ) -> Any:
__lowerCAmelCase =[]
__lowerCAmelCase =set({"""(""", """[""", """{"""} )
__lowerCAmelCase =set({""")""", """]""", """}"""} )
__lowerCAmelCase ={"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(__lowerCamelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(__lowerCamelCase ) == 0 or (len(__lowerCamelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(__lowerCamelCase ) == 0
def __lowerCAmelCase ( ) -> List[str]:
__lowerCAmelCase =input("""Enter sequence of brackets: """ )
if is_balanced(__lowerCamelCase ):
print(__lowerCamelCase , """is balanced""" )
else:
print(__lowerCamelCase , """is not balanced""" )
if __name__ == "__main__":
main()
| 456
| 1
|
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowerCamelCase = 2
class _snake_case :
def __init__( self ,*, # begin keyword-only arguments
_snake_case="<s>" ,_snake_case="<pad>" ,_snake_case="</s>" ,_snake_case="<unk>" ,_snake_case=None ,):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = bos, unk, pad, eos
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Dict = {}
UpperCAmelCase_ : Optional[int] = self.add_symbol(_snake_case )
UpperCAmelCase_ : Tuple = self.add_symbol(_snake_case )
UpperCAmelCase_ : Dict = self.add_symbol(_snake_case )
UpperCAmelCase_ : Optional[Any] = self.add_symbol(_snake_case )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_snake_case )
UpperCAmelCase_ : Tuple = len(self.symbols )
def __eq__( self ,_snake_case ):
return self.indices == other.indices
def __getitem__( self ,_snake_case ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
return len(self.symbols )
def __contains__( self ,_snake_case ):
return sym in self.indices
@classmethod
def UpperCamelCase__ ( cls ,_snake_case ):
UpperCAmelCase_ : Dict = cls()
d.add_from_file(_snake_case )
return d
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=1 ,_snake_case=False ):
if word in self.indices and not overwrite:
UpperCAmelCase_ : Union[str, Any] = self.indices[word]
UpperCAmelCase_ : Optional[int] = self.count[idx] + n
return idx
else:
UpperCAmelCase_ : List[Any] = len(self.symbols )
UpperCAmelCase_ : int = idx
self.symbols.append(_snake_case )
self.count.append(_snake_case )
return idx
def UpperCamelCase__ ( self ,_snake_case ):
return 0
def UpperCamelCase__ ( self ,_snake_case ):
if isinstance(_snake_case ,_snake_case ):
try:
with open(_snake_case ,"r" ,encoding="utf-8" ) as fd:
self.add_from_file(_snake_case )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(_snake_case ) )
return
UpperCAmelCase_ : Optional[int] = f.readlines()
UpperCAmelCase_ : Dict = self._load_meta(_snake_case )
for line in lines[indices_start_line:]:
try:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = line.rstrip().rsplit(" " ,1 )
if field == "#fairseq:overwrite":
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = line.rsplit(" " ,1 )
else:
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : Optional[Any] = int(_snake_case )
UpperCAmelCase_ : Any = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(_snake_case ) )
self.add_symbol(_snake_case ,n=_snake_case ,overwrite=_snake_case )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = dict((re.sub(r"@@$" , "" , _SCREAMING_SNAKE_CASE ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , _SCREAMING_SNAKE_CASE ), v) for k, v in d.items() )
UpperCAmelCase_ : Any = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
UpperCAmelCase_ : int = d[k] # restore
return da
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
UpperCAmelCase_ : str = os.path.join(_SCREAMING_SNAKE_CASE , "checkpoint.pt" )
if not os.path.isfile(_SCREAMING_SNAKE_CASE ):
raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' )
UpperCAmelCase_ : Tuple = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )
UpperCAmelCase_ : Optional[Any] = chkpt["cfg"]["model"]
# dicts
UpperCAmelCase_ : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , "dict.txt" )
if not os.path.isfile(_SCREAMING_SNAKE_CASE ):
raise ValueError(F'''path to the file {dict_file} does not exist!''' )
UpperCAmelCase_ : Optional[int] = Dictionary.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase_ : List[Any] = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , VOCAB_FILES_NAMES["vocab_file"] )
print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) )
# merges_file (bpecodes)
UpperCAmelCase_ : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , "bpecodes" )
if not os.path.isfile(_SCREAMING_SNAKE_CASE ):
raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' )
UpperCAmelCase_ : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# model config
UpperCAmelCase_ : str = os.path.join(_SCREAMING_SNAKE_CASE , "config.json" )
UpperCAmelCase_ : List[str] = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F'''Generating {biogpt_model_config_file}''' )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) )
# tokenizer config
UpperCAmelCase_ : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 10_24,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F'''Generating {biogpt_tokenizer_config_file}''' )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) )
# model
UpperCAmelCase_ : Union[str, Any] = chkpt["model"]
# remove unneeded keys
UpperCAmelCase_ : Dict = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
UpperCAmelCase_ : Dict = model_state_dict.pop(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : str = model_state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = BioGptConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = BioGptForCausalLM(_SCREAMING_SNAKE_CASE )
# check that it loads ok
model_new.load_state_dict(_SCREAMING_SNAKE_CASE )
# save
UpperCAmelCase_ : int = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print("Conversion is done!" )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCamelCase = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 71
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __a ( __magic_name__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , "width_multiplier" ) )
class __a :
"""simple docstring"""
def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=2 , snake_case=3 , snake_case="swish" , snake_case=3 , snake_case=32 , snake_case=0.1 , snake_case=0.02 , snake_case=True , snake_case=True , snake_case=10 , snake_case=None , snake_case=0.25 , snake_case=0.0 , snake_case=0.0 , ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = parent
lowerCAmelCase__ : Optional[Any] = batch_size
lowerCAmelCase__ : Optional[Any] = image_size
lowerCAmelCase__ : Tuple = patch_size
lowerCAmelCase__ : Any = num_channels
lowerCAmelCase__ : Tuple = make_divisible(512 * width_multiplier , divisor=8 )
lowerCAmelCase__ : str = hidden_act
lowerCAmelCase__ : Union[str, Any] = conv_kernel_size
lowerCAmelCase__ : List[str] = output_stride
lowerCAmelCase__ : List[Any] = classifier_dropout_prob
lowerCAmelCase__ : List[Any] = use_labels
lowerCAmelCase__ : Tuple = is_training
lowerCAmelCase__ : Optional[int] = num_labels
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : str = scope
lowerCAmelCase__ : Optional[Any] = width_multiplier
lowerCAmelCase__ : Union[str, Any] = ffn_dropout
lowerCAmelCase__ : Tuple = attn_dropout
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : List[str] = None
if self.use_labels:
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase__ : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Any = MobileViTVaModel(config=snake_case )
model.to(snake_case )
model.eval()
lowerCAmelCase__ : int = model(snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Any = self.num_labels
lowerCAmelCase__ : Any = MobileViTVaForImageClassification(snake_case )
model.to(snake_case )
model.eval()
lowerCAmelCase__ : int = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.num_labels
lowerCAmelCase__ : Optional[int] = MobileViTVaForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
lowerCAmelCase__ : Any = model(snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCAmelCase__ : Tuple = model(snake_case , labels=snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : int = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = config_and_inputs
lowerCAmelCase__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __a ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase : List[Any] = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase : str = False
__UpperCamelCase : int = False
__UpperCamelCase : Tuple = False
__UpperCamelCase : List[str] = False
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = MobileViTVaModelTester(self )
lowerCAmelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Union[str, Any] = model_class(snake_case )
lowerCAmelCase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : int = [*signature.parameters.keys()]
lowerCAmelCase__ : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
def check_hidden_states_output(snake_case , snake_case , snake_case ):
lowerCAmelCase__ : List[str] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(**self._prepare_for_class(snake_case , snake_case ) )
lowerCAmelCase__ : str = outputs.hidden_states
lowerCAmelCase__ : List[Any] = 5
self.assertEqual(len(snake_case ) , snake_case )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCAmelCase__ : str = 2
for i in range(len(snake_case ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Dict = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Dict = MobileViTVaModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
lowerCAmelCase__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Dict = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
snake_case )
lowerCAmelCase__ : Union[str, Any] = self.default_image_processor
lowerCAmelCase__ : Union[str, Any] = prepare_img()
lowerCAmelCase__ : List[str] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : str = model(**snake_case )
# verify the logits
lowerCAmelCase__ : int = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case )
lowerCAmelCase__ : Optional[int] = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCAmelCase__ : Tuple = model.to(snake_case )
lowerCAmelCase__ : Optional[Any] = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCAmelCase__ : Union[str, Any] = prepare_img()
lowerCAmelCase__ : str = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Any = model(**snake_case )
lowerCAmelCase__ : List[str] = outputs.logits
# verify the logits
lowerCAmelCase__ : Tuple = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , snake_case )
lowerCAmelCase__ : Dict = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=snake_case , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCAmelCase__ : List[Any] = model.to(snake_case )
lowerCAmelCase__ : str = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCAmelCase__ : Optional[int] = prepare_img()
lowerCAmelCase__ : List[Any] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**snake_case )
lowerCAmelCase__ : List[Any] = outputs.logits.detach().cpu()
lowerCAmelCase__ : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=snake_case , target_sizes=[(50, 60)] )
lowerCAmelCase__ : Union[str, Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , snake_case )
lowerCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case )
lowerCAmelCase__ : Optional[int] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , snake_case )
| 453
| 0
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase_ : List[str] = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def A__ ( snake_case_ : Optional[int] ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def A__ ( snake_case_ : Optional[int] ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__: Optional[int]= terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 107
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=18 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , ) -> Any:
SCREAMING_SNAKE_CASE__: Tuple= size if size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE__: Dict= parent
SCREAMING_SNAKE_CASE__: Tuple= batch_size
SCREAMING_SNAKE_CASE__: int= num_channels
SCREAMING_SNAKE_CASE__: List[Any]= image_size
SCREAMING_SNAKE_CASE__: Dict= min_resolution
SCREAMING_SNAKE_CASE__: Union[str, Any]= max_resolution
SCREAMING_SNAKE_CASE__: Optional[Any]= do_resize
SCREAMING_SNAKE_CASE__: List[Any]= size
SCREAMING_SNAKE_CASE__: Optional[Any]= apply_ocr
def UpperCamelCase_ ( self ) -> Optional[int]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: List[Any]= LayoutLMvaImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Tuple= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''apply_ocr''' ) )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Tuple= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def UpperCamelCase_ ( self ) -> Any:
pass
def UpperCamelCase_ ( self ) -> List[str]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: int= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__: Optional[int]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__: str= image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , lowerCAmelCase )
self.assertIsInstance(encoding.boxes , lowerCAmelCase )
# Test batched
SCREAMING_SNAKE_CASE__: Optional[Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Tuple= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__: Dict= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__: Dict= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Tuple= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__: int= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Optional[Any]:
# with apply_OCR = True
SCREAMING_SNAKE_CASE__: int= LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE__: int= load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
SCREAMING_SNAKE_CASE__: str= Image.open(ds[0]['''file'''] ).convert('''RGB''' )
SCREAMING_SNAKE_CASE__: str= image_processing(lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE__: Dict= [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
SCREAMING_SNAKE_CASE__: List[Any]= [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCAmelCase )
self.assertListEqual(encoding.boxes , lowerCAmelCase )
# with apply_OCR = False
SCREAMING_SNAKE_CASE__: int= LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 107
| 1
|
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
a_ :Optional[int] = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def a ( A__ ) -> Union[str, Any]:
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
a_ :List[Any] = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
a_ :Dict = parser.parse_args()
if args.check_lib:
a_ :List[Any] = importlib.import_module('transformers')
a_ :List[str] = Path(transformers_module.__file__).parent
else:
a_ :Union[str, Any] = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 35
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCAmelCase =imread(R"digital_image_processing/image_data/lena_small.jpg")
__lowerCAmelCase =cvtColor(img, COLOR_BGR2GRAY)
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = cn.convert_to_negative(_lowerCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def __UpperCamelCase ( ):
"""simple docstring"""
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_lowerCAmelCase , 1_10 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
UpperCAmelCase = canny.canny(_lowerCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def __UpperCamelCase ( ):
"""simple docstring"""
assert gg.gaussian_filter(_lowerCAmelCase , 5 , sigma=0.9 ).all()
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
UpperCAmelCase = conv.img_convolve(_lowerCAmelCase , _lowerCAmelCase ).astype(_lowerCAmelCase )
assert res.any()
def __UpperCamelCase ( ):
"""simple docstring"""
assert med.median_filter(_lowerCAmelCase , 3 ).any()
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = sob.sobel_filter(_lowerCAmelCase )
assert grad.any() and theta.any()
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = sp.make_sepia(_lowerCAmelCase , 20 )
assert sepia.all()
def __UpperCamelCase ( _lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" ):
"""simple docstring"""
UpperCAmelCase = bs.Burkes(imread(_lowerCAmelCase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def __UpperCamelCase ( _lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
"""simple docstring"""
UpperCAmelCase = rs.NearestNeighbour(imread(_lowerCAmelCase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
UpperCAmelCase = imread(_lowerCAmelCase , 0 )
# Test for get_neighbors_pixel function() return not None
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = image[x_coordinate][y_coordinate]
UpperCAmelCase = lbp.get_neighbors_pixel(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
UpperCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
UpperCAmelCase = lbp.local_binary_value(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
assert lbp_image.any()
| 333
| 0
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Tuple , __lowercase : UNetaDModel , __lowercase : UNetaDModel , __lowercase : DDPMScheduler , __lowercase : Tuple , ):
'''simple docstring'''
super().__init__()
__a = value_function
__a = unet
__a = scheduler
__a = env
__a = env.get_dataset()
__a = {}
for key in self.data.keys():
try:
__a = self.data[key].mean()
except: # noqa: E722
pass
__a = {}
for key in self.data.keys():
try:
__a = self.data[key].std()
except: # noqa: E722
pass
__a = env.observation_space.shape[0]
__a = env.action_space.shape[0]
def UpperCamelCase_ ( self : Optional[int] , __lowercase : Any , __lowercase : Optional[int] ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Any , __lowercase : List[str] ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[Any] ):
'''simple docstring'''
if type(__lowercase ) is dict:
return {k: self.to_torch(__lowercase ) for k, v in x_in.items()}
elif torch.is_tensor(__lowercase ):
return x_in.to(self.unet.device )
return torch.tensor(__lowercase , device=self.unet.device )
def UpperCamelCase_ ( self : List[Any] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Dict ):
'''simple docstring'''
for key, val in cond.items():
__a = val.clone()
return x_in
def UpperCamelCase_ ( self : List[str] , __lowercase : int , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict ):
'''simple docstring'''
__a = x.shape[0]
__a = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
__a = torch.full((batch_size,) , __lowercase , device=self.unet.device , dtype=torch.long )
for _ in range(__lowercase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
__a = self.value_function(x.permute(0 , 2 , 1 ) , __lowercase ).sample
__a = torch.autograd.grad([y.sum()] , [x] )[0]
__a = self.scheduler._get_variance(__lowercase )
__a = torch.exp(0.5 * posterior_variance )
__a = model_std * grad
__a = 0
__a = x.detach()
__a = x + scale * grad
__a = self.reset_xa(__lowercase , __lowercase , self.action_dim )
__a = self.unet(x.permute(0 , 2 , 1 ) , __lowercase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
__a = self.scheduler.step(__lowercase , __lowercase , __lowercase , predict_epsilon=__lowercase )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
__a = self.reset_xa(__lowercase , __lowercase , self.action_dim )
__a = self.to_torch(__lowercase )
return x, y
def __call__( self : str , __lowercase : Union[str, Any] , __lowercase : Tuple=64 , __lowercase : Dict=32 , __lowercase : Optional[Any]=2 , __lowercase : Optional[int]=0.1 ):
'''simple docstring'''
# normalize the observations and create batch dimension
__a = self.normalize(__lowercase , """observations""" )
__a = obs[None].repeat(__lowercase , axis=0 )
__a = {0: self.to_torch(__lowercase )}
__a = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
__a = randn_tensor(__lowercase , device=self.unet.device )
__a = self.reset_xa(__lowercase , __lowercase , self.action_dim )
__a = self.to_torch(__lowercase )
# run the diffusion process
__a , __a = self.run_diffusion(__lowercase , __lowercase , __lowercase , __lowercase )
# sort output trajectories by value
__a = y.argsort(0 , descending=__lowercase ).squeeze()
__a = x[sorted_idx]
__a = sorted_values[:, :, : self.action_dim]
__a = actions.detach().cpu().numpy()
__a = self.de_normalize(__lowercase , key="""actions""" )
# select the action with the highest value
if y is not None:
__a = 0
else:
# if we didn't run value guiding, select a random action
__a = np.random.randint(0 , __lowercase )
__a = denorm_actions[selected_index, 0]
return denorm_actions
| 547
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = 1
__a = 3
__a = (32, 32)
__a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowercase )
return image
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__lowercase )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
def extract(*__lowercase : Tuple , **__lowercase : Dict ):
class SCREAMING_SNAKE_CASE :
def __init__( self : List[str] ):
'''simple docstring'''
__a = torch.ones([0] )
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : str ):
'''simple docstring'''
self.pixel_values.to(__lowercase )
return self
return Out()
return extract
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a = self.dummy_cond_unet
__a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
__a = self.dummy_vae
__a = self.dummy_text_encoder
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__a = StableDiffusionPipeline(
unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , )
__a = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__a = """A painting of a squirrel eating a burger"""
__a = torch.Generator(device=__lowercase ).manual_seed(0 )
__a = sd_pipe([prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__a = output.images
__a = torch.Generator(device=__lowercase ).manual_seed(0 )
__a = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowercase , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a = self.dummy_cond_unet
__a = PNDMScheduler(skip_prk_steps=__lowercase )
__a = self.dummy_vae
__a = self.dummy_text_encoder
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__a = StableDiffusionPipeline(
unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , )
__a = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__a = """A painting of a squirrel eating a burger"""
__a = torch.Generator(device=__lowercase ).manual_seed(0 )
__a = sd_pipe([prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__a = output.images
__a = torch.Generator(device=__lowercase ).manual_seed(0 )
__a = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowercase , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=__lowercase )
assert isinstance(__lowercase , __lowercase )
assert isinstance(pipe.scheduler , __lowercase )
assert pipe.safety_checker is None
__a = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowercase )
__a = StableDiffusionPipeline.from_pretrained(__lowercase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__a = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.dummy_cond_unet
__a = PNDMScheduler(skip_prk_steps=__lowercase )
__a = self.dummy_vae
__a = self.dummy_text_encoder
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
__a = unet.half()
__a = vae.half()
__a = bert.half()
# make sure here that pndm scheduler skips prk
__a = StableDiffusionPipeline(
unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , )
__a = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__a = """A painting of a squirrel eating a burger"""
__a = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowercase )
__a = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__a = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__a = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
__a = 4003660346
__a = 7
# without safety guidance (sld_guidance_scale = 0)
__a = torch.manual_seed(__lowercase )
__a = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
__a = torch.manual_seed(__lowercase )
__a = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowercase )
__a = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__a = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__a = """padme amidala taking a bath artwork, safe for work, no nudity"""
__a = 2734971755
__a = 7
__a = torch.manual_seed(__lowercase )
__a = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
__a = torch.manual_seed(__lowercase )
__a = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
__a = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__a = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
__a = 1044355234
__a = 12
__a = torch.manual_seed(__lowercase )
__a = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
__a = torch.manual_seed(__lowercase )
__a = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 547
| 1
|
UpperCAmelCase_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase_ = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
assert len(str(SCREAMING_SNAKE_CASE_ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_UpperCAmelCase = year // 1_00
_UpperCAmelCase = (5 * (century % 4) + 2) % 7
_UpperCAmelCase = year % 1_00
_UpperCAmelCase = centurian % 12
_UpperCAmelCase = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_UpperCAmelCase = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_UpperCAmelCase = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
|
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float ) -> np.ndarray:
"""simple docstring"""
return np.where(vector > 0 , SCREAMING_SNAKE_CASE_ , (alpha * (np.exp(SCREAMING_SNAKE_CASE_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
| 1
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 310
|
"""simple docstring"""
import os
def A_ ( ):
'''simple docstring'''
with open(os.path.dirname(_lowercase ) + """/grid.txt""" ) as f:
snake_case_ :Optional[int] = [] # noqa: E741
for _ in range(20 ):
l.append([int(_lowercase ) for x in f.readline().split()] )
snake_case_ :str = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case_ :Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case_ :str = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case_ :Tuple = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case_ :Union[str, Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case_ :Optional[int] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case_ :Union[str, Any] = temp
# diagonal 2
for i in range(17 ):
for j in range(3, 20 ):
snake_case_ :int = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case_ :Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 310
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Optional[int] ="timm_backbone"
def __init__( self ,_snake_case=None ,_snake_case=3 ,_snake_case=True ,_snake_case=True ,_snake_case=None ,**_snake_case ,):
super().__init__(**_snake_case )
UpperCAmelCase_ : Tuple = backbone
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : str = features_only
UpperCAmelCase_ : int = use_pretrained_backbone
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : str = out_indices if out_indices is not None else (-1,)
| 71
|
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase__ = 60_08_51_47_51_43 ) -> int:
'''simple docstring'''
try:
a__ = int(UpperCAmelCase__ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
a__ = 2
a__ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
a__ = i
while n % i == 0:
a__ = n // i
i += 1
return int(UpperCAmelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 232
| 0
|
'''simple docstring'''
import torch
from transformers import AutoModel
class UpperCAmelCase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_="sayef/fsner-bert-base-uncased" ) -> int:
'''simple docstring'''
super(SCREAMING_SNAKE_CASE_ , self ).__init__()
lowerCamelCase_ = AutoModel.from_pretrained(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.nn.CosineSimilarity(3 , 1E-08 )
lowerCamelCase_ = torch.nn.Softmax(dim=1 )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
return self.bert(**SCREAMING_SNAKE_CASE_ ).last_hidden_state
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1 ) -> List[Any]:
'''simple docstring'''
return self.softmax(T * self.cos(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = W_supports['sizes'].tolist()
lowerCamelCase_ = W_supports['start_token_id'].item()
lowerCamelCase_ = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowerCamelCase_ = self.BERT(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.BERT(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = W_supports['input_ids'] == start_token_id
lowerCamelCase_ = W_supports['input_ids'] == end_token_id
for i, size in enumerate(SCREAMING_SNAKE_CASE_ ):
if i == 0:
lowerCamelCase_ = 0
else:
lowerCamelCase_ = support_sizes[i - 1]
lowerCamelCase_ = S[s : s + size][start_token_masks[s : s + size]]
lowerCamelCase_ = S[s : s + size][end_token_masks[s : s + size]]
lowerCamelCase_ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowerCamelCase_ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowerCamelCase_ = torch.vstack((p_starts, p_start) )
lowerCamelCase_ = torch.vstack((p_ends, p_end) )
else:
lowerCamelCase_ = p_start
lowerCamelCase_ = p_end
return p_starts, p_ends
| 708
|
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _UpperCamelCase ( __UpperCamelCase ) -> Optional[int]:
print('Loading config file...' )
def flatten_yaml_as_dict(__UpperCamelCase ,__UpperCamelCase="" ,__UpperCamelCase="." ):
lowerCamelCase_ = []
for k, v in d.items():
lowerCamelCase_ = parent_key + sep + k if parent_key else k
if isinstance(__UpperCamelCase ,collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__UpperCamelCase ,__UpperCamelCase ,sep=__UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(__UpperCamelCase )
lowerCamelCase_ = argparse.Namespace()
with open(__UpperCamelCase ,'r' ) as yaml_file:
try:
lowerCamelCase_ = yaml.load(__UpperCamelCase ,Loader=yaml.FullLoader )
lowerCamelCase_ = flatten_yaml_as_dict(__UpperCamelCase )
for k, v in flat_cfg.items():
setattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__UpperCamelCase ,str(__UpperCamelCase ) ) )
return config
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
lowerCamelCase_ = MobileViTVaConfig()
lowerCamelCase_ = False
# dataset
if task_name.startswith('imagenet1k_' ):
lowerCamelCase_ = 10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
lowerCamelCase_ = 3_84
else:
lowerCamelCase_ = 2_56
lowerCamelCase_ = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
lowerCamelCase_ = 2_10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
lowerCamelCase_ = 3_84
else:
lowerCamelCase_ = 2_56
lowerCamelCase_ = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
lowerCamelCase_ = 1_51
lowerCamelCase_ = 5_12
lowerCamelCase_ = 'ade20k-id2label.json'
lowerCamelCase_ = True
elif task_name.startswith('voc_' ):
lowerCamelCase_ = 21
lowerCamelCase_ = 5_12
lowerCamelCase_ = 'pascal-voc-id2label.json'
lowerCamelCase_ = True
# orig_config
lowerCamelCase_ = load_orig_config_file(__UpperCamelCase )
assert getattr(__UpperCamelCase ,'model.classification.name' ,-1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.classification.mitv2.width_multiplier' ,1.0 )
assert (
getattr(__UpperCamelCase ,'model.classification.mitv2.attn_norm_layer' ,-1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.classification.activation.name' ,'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.output_stride' ,16 )
if "_deeplabv3" in task_name:
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_rates' ,[12, 24, 36] )
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_out_channels' ,5_12 )
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_dropout' ,0.1 )
# id2label
lowerCamelCase_ = 'huggingface/label-files'
lowerCamelCase_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='dataset' ) ,'r' ) )
lowerCamelCase_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
lowerCamelCase_ = dct.pop(__UpperCamelCase )
lowerCamelCase_ = val
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=False ) -> int:
if base_model:
lowerCamelCase_ = ''
else:
lowerCamelCase_ = 'mobilevitv2.'
lowerCamelCase_ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase_ = k[8:]
else:
lowerCamelCase_ = k
if ".block." in k:
lowerCamelCase_ = k_new.replace('.block.' ,'.' )
if ".conv." in k:
lowerCamelCase_ = k_new.replace('.conv.' ,'.convolution.' )
if ".norm." in k:
lowerCamelCase_ = k_new.replace('.norm.' ,'.normalization.' )
if "conv_1." in k:
lowerCamelCase_ = k_new.replace('conv_1.' ,f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.''' ,f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowerCamelCase_ = k_new.replace('.exp_1x1.' ,'.expand_1x1.' )
if ".red_1x1." in k:
lowerCamelCase_ = k_new.replace('.red_1x1.' ,'.reduce_1x1.' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.0.''' ,f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.1.local_rep.0.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.1.local_rep.1.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase_ = [0, 1]
elif i == 4:
lowerCamelCase_ = [0, 1, 2, 3]
elif i == 5:
lowerCamelCase_ = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
lowerCamelCase_ = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' ,f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowerCamelCase_ = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' ,f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.1.conv_proj.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowerCamelCase_ = k_new.replace('pre_norm_attn.0.' ,'layernorm_before.' )
if "pre_norm_attn.1." in k:
lowerCamelCase_ = k_new.replace('pre_norm_attn.1.' ,'attention.' )
if "pre_norm_ffn.0." in k:
lowerCamelCase_ = k_new.replace('pre_norm_ffn.0.' ,'layernorm_after.' )
if "pre_norm_ffn.1." in k:
lowerCamelCase_ = k_new.replace('pre_norm_ffn.1.' ,'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
lowerCamelCase_ = k_new.replace('pre_norm_ffn.3.' ,'ffn.conv2.' )
if "classifier.1." in k:
lowerCamelCase_ = k_new.replace('classifier.1.' ,'classifier.' )
if "seg_head." in k:
lowerCamelCase_ = k_new.replace('seg_head.' ,'segmentation_head.' )
if ".aspp_layer." in k:
lowerCamelCase_ = k_new.replace('.aspp_layer.' ,'.' )
if ".aspp_pool." in k:
lowerCamelCase_ = k_new.replace('.aspp_pool.' ,'.' )
rename_keys.append((k, k_new) )
return rename_keys
def _UpperCamelCase ( __UpperCamelCase ) -> Dict:
lowerCamelCase_ = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> Any:
lowerCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
lowerCamelCase_ = get_mobilevitva_config(__UpperCamelCase ,__UpperCamelCase )
# load original state_dict
lowerCamelCase_ = torch.load(__UpperCamelCase ,map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
lowerCamelCase_ = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval()
lowerCamelCase_ = False
else:
lowerCamelCase_ = MobileViTVaForImageClassification(__UpperCamelCase ).eval()
lowerCamelCase_ = False
# remove and rename some keys of load the original model
lowerCamelCase_ = checkpoint
remove_unused_keys(__UpperCamelCase )
lowerCamelCase_ = create_rename_keys(__UpperCamelCase ,base_model=__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# load modified state_dict
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase_ = MobileViTImageProcessor(crop_size=config.image_size ,size=config.image_size + 32 )
lowerCamelCase_ = image_processor(images=prepare_img() ,return_tensors='pt' )
lowerCamelCase_ = model(**__UpperCamelCase )
# verify classification model
if task_name.startswith('imagenet' ):
lowerCamelCase_ = outputs.logits
lowerCamelCase_ = logits.argmax(-1 ).item()
print('Predicted class:' ,model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase_ = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] )
assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1e-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
A_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 384
| 0
|
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 238
|
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 238
| 1
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase_ = 1_6
lowercase_ = 3_2
def a ( A__ : List[str] ) -> List[str]:
"""simple docstring"""
return int(x / 2**20 )
class __lowerCAmelCase :
def __enter__( self ) -> Tuple:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_lowercase =torch.cuda.memory_allocated()
return self
def __exit__( self , *lowerCAmelCase ) -> str:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
_lowercase =torch.cuda.memory_allocated()
_lowercase =torch.cuda.max_memory_allocated()
_lowercase =bamb(self.end - self.begin )
_lowercase =bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a ( A__ : Accelerator , A__ : int = 16 , A__ : str = "bert-base-cased" , A__ : int = 320 , A__ : int = 160 , ) -> int:
"""simple docstring"""
_lowercase =AutoTokenizer.from_pretrained(A__ )
_lowercase =load_dataset(
'glue' , 'mrpc' , split={'train': F'''train[:{n_train}]''', 'validation': F'''validation[:{n_val}]'''} )
def tokenize_function(A__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
_lowercase =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowercase =datasets.map(
A__ , batched=A__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowercase =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(A__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_lowercase =DataLoader(
tokenized_datasets['train'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
_lowercase =DataLoader(
tokenized_datasets['validation'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def a ( A__ : Optional[int] , A__ : List[str] ) -> Any:
"""simple docstring"""
_lowercase =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase =config['lr']
_lowercase =int(config['num_epochs'] )
_lowercase =int(config['seed'] )
_lowercase =int(config['batch_size'] )
_lowercase =args.model_name_or_path
set_seed(A__ )
_lowercase , _lowercase =get_dataloaders(A__ , A__ , A__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase =AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
_lowercase =(
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowercase =optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
_lowercase =accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
_lowercase =1
_lowercase =(len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowercase =get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
_lowercase =DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
_lowercase =0
# We also need to keep track of the stating epoch so files are named properly
_lowercase =0
# Now we train the model
_lowercase ={}
for epoch in range(A__ , A__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(A__ ):
_lowercase =model(**A__ )
_lowercase =outputs.loss
_lowercase =loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_lowercase =tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(A__ , A__ )
def a ( ) -> str:
"""simple docstring"""
_lowercase =argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=A__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=A__ , )
parser.add_argument(
'--output_dir' , type=A__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=A__ , default=A__ , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=A__ , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=A__ , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=A__ , default=1 , help='Number of train epochs.' , )
_lowercase =parser.parse_args()
_lowercase ={'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 380
|
def a ( A__ : Optional[int] ) -> Tuple:
"""simple docstring"""
_lowercase =[0] * len(A__ )
_lowercase =[]
_lowercase =[]
_lowercase =0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(A__ ) ):
if indegree[i] == 0:
queue.append(A__ )
while queue:
_lowercase =queue.pop(0 )
cnt += 1
topo.append(A__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(A__ )
if cnt != len(A__ ):
print('Cycle exists' )
else:
print(A__ )
# Adjacency List of Graph
lowercase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 380
| 1
|
'''simple docstring'''
def __snake_case ( lowercase : int , lowercase : bool = False ):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
snake_case_ = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
snake_case_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(lowercase , 1 ):
if n < _p:
# then we have our last prime to check
snake_case_ = primes[:idx]
break
snake_case_ , snake_case_ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
snake_case_ = False
for r in range(lowercase ):
snake_case_ = pow(lowercase , d * 2**r , lowercase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
snake_case_ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __snake_case ( ):
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 508
|
'''simple docstring'''
def __snake_case ( lowercase : int = 1_000_000 ):
snake_case_ = set(range(3 , lowercase , 2 ) )
primes.add(2 )
for p in range(3 , lowercase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowercase , lowercase ) ) )
snake_case_ = [float(lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase , limit + 1 , lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 508
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase :Tuple = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :List[str] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :List[str] = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :List[Any] = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__lowercase :Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowercase :List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase )
| 26
| 1
|
import pickle
import numpy as np
from matplotlib import pyplot as plt
class A__ :
"""simple docstring"""
def __init__( self : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : int=0.2 , lowerCamelCase__ : Dict=0.2 ):
a__ : Optional[int] = bp_numa
a__ : Tuple = bp_numa
a__ : Optional[Any] = bp_numa
a__ : str = conva_get[:2]
a__ : Optional[int] = conva_get[2]
a__ : Any = size_pa
a__ : List[str] = rate_w
a__ : str = rate_t
a__ : Optional[int] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
a__ : Optional[int] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
a__ : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
a__ : List[str] = -2 * np.random.rand(self.conva[1] ) + 1
a__ : List[str] = -2 * np.random.rand(self.num_bpa ) + 1
a__ : List[Any] = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Optional[Any] ):
# save model dict with pickle
a__ : int = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(lowerCamelCase__ , "wb" ) as f:
pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
print(f'''Model saved: {save_path}''' )
@classmethod
def _UpperCamelCase( cls : Tuple , lowerCamelCase__ : Tuple ):
# read saved model
with open(lowerCamelCase__ , "rb" ) as f:
a__ : Tuple = pickle.load(lowerCamelCase__ ) # noqa: S301
a__ : List[str] = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
a__ : Optional[Any] = model_dic.get("size_pooling1" )
a__ : Optional[Any] = model_dic.get("num_bp1" )
a__ : Tuple = model_dic.get("num_bp2" )
a__ : int = model_dic.get("num_bp3" )
a__ : Tuple = model_dic.get("rate_weight" )
a__ : Optional[Any] = model_dic.get("rate_thre" )
# create model instance
a__ : Tuple = CNN(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# modify model parameter
a__ : Tuple = model_dic.get("w_conv1" )
a__ : int = model_dic.get("wkj" )
a__ : List[str] = model_dic.get("vji" )
a__ : Optional[int] = model_dic.get("thre_conv1" )
a__ : Optional[int] = model_dic.get("thre_bp2" )
a__ : Optional[int] = model_dic.get("thre_bp3" )
return conv_ins
def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ):
return 1 / (1 + np.exp(-1 * x ))
def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ):
return round(lowerCamelCase__ , 3 )
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any ):
# convolution process
a__ : Any = convs[0]
a__ : str = convs[1]
a__ : int = np.shape(lowerCamelCase__ )[0]
# get the data slice of original image data, data_focus
a__ : Optional[int] = []
for i_focus in range(0 , size_data - size_conv + 1 , lowerCamelCase__ ):
for j_focus in range(0 , size_data - size_conv + 1 , lowerCamelCase__ ):
a__ : List[str] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCamelCase__ )
# calculate the feature map of every single kernel, and saved as list of matrix
a__ : str = []
a__ : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCamelCase__ ):
a__ : Optional[Any] = []
for i_focus in range(len(lowerCamelCase__ ) ):
a__ : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCamelCase__ ) )
a__ : Optional[int] = np.asmatrix(lowerCamelCase__ ).reshape(
lowerCamelCase__ , lowerCamelCase__ )
data_featuremap.append(lowerCamelCase__ )
# expanding the data slice to One dimenssion
a__ : Union[str, Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCamelCase__ ) )
a__ : Union[str, Any] = np.asarray(lowerCamelCase__ )
return focus_list, data_featuremap
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int]="average_pool" ):
# pooling process
a__ : Union[str, Any] = len(featuremaps[0] )
a__ : Union[str, Any] = int(size_map / size_pooling )
a__ : List[str] = []
for i_map in range(len(lowerCamelCase__ ) ):
a__ : str = featuremaps[i_map]
a__ : Tuple = []
for i_focus in range(0 , lowerCamelCase__ , lowerCamelCase__ ):
for j_focus in range(0 , lowerCamelCase__ , lowerCamelCase__ ):
a__ : Any = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCamelCase__ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCamelCase__ ) )
a__ : List[str] = np.asmatrix(lowerCamelCase__ ).reshape(lowerCamelCase__ , lowerCamelCase__ )
featuremap_pooled.append(lowerCamelCase__ )
return featuremap_pooled
def _UpperCamelCase( self : Any , lowerCamelCase__ : str ):
# expanding three dimension data to one dimension list
a__ : Optional[int] = []
for i in range(len(lowerCamelCase__ ) ):
a__ : Dict = np.shape(data[i] )
a__ : Union[str, Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
a__ : List[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCamelCase__ )
a__ : Dict = np.asarray(lowerCamelCase__ )
return data_expanded
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : Tuple ):
# expanding matrix to one dimension list
a__ : str = np.asarray(lowerCamelCase__ )
a__ : Optional[int] = np.shape(lowerCamelCase__ )
a__ : Union[str, Any] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _UpperCamelCase( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : str , lowerCamelCase__ : Any ):
a__ : int = []
a__ : int = 0
for i_map in range(lowerCamelCase__ ):
a__ : List[Any] = np.ones((size_map, size_map) )
for i in range(0 , lowerCamelCase__ , lowerCamelCase__ ):
for j in range(0 , lowerCamelCase__ , lowerCamelCase__ ):
a__ : Union[str, Any] = pd_pool[
i_pool
]
a__ : str = i_pool + 1
a__ : Any = np.multiply(
lowerCamelCase__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(lowerCamelCase__ )
return pd_all
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str]=bool ):
# model traning
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(lowerCamelCase__ )) )
print((" - - Shape: Teach_Data ", np.shape(lowerCamelCase__ )) )
a__ : str = 0
a__ : List[str] = []
a__ : int = 10_000
while rp < n_repeat and mse >= error_accuracy:
a__ : Optional[int] = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(lowerCamelCase__ ) ):
# print('------------Learning Image: %d--------------'%p)
a__ : Optional[Any] = np.asmatrix(datas_train[p] )
a__ : str = np.asarray(datas_teach[p] )
a__, a__ : Dict = self.convolute(
lowerCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a__ : int = self.pooling(lowerCamelCase__ , self.size_poolinga )
a__ : Dict = np.shape(lowerCamelCase__ )
a__ : List[str] = self._expand(lowerCamelCase__ )
a__ : Dict = data_bp_input
a__ : Dict = np.dot(lowerCamelCase__ , self.vji.T ) - self.thre_bpa
a__ : Optional[int] = self.sig(lowerCamelCase__ )
a__ : int = np.dot(lowerCamelCase__ , self.wkj.T ) - self.thre_bpa
a__ : int = self.sig(lowerCamelCase__ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
a__ : Tuple = np.multiply(
(data_teach - bp_outa) , np.multiply(lowerCamelCase__ , (1 - bp_outa) ) )
a__ : Optional[Any] = np.multiply(
np.dot(lowerCamelCase__ , self.wkj ) , np.multiply(lowerCamelCase__ , (1 - bp_outa) ) )
a__ : Optional[Any] = np.dot(lowerCamelCase__ , self.vji )
a__ : str = pd_i_all / (self.size_poolinga * self.size_poolinga)
a__ : Optional[Any] = pd_conva_pooled.T.getA().tolist()
a__ : Dict = self._calculate_gradient_from_pool(
lowerCamelCase__ , lowerCamelCase__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
a__ : List[str] = self._expand_mat(pd_conva_all[k_conv] )
a__ : Any = self.rate_weight * np.dot(lowerCamelCase__ , lowerCamelCase__ )
a__ : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
a__ : Any = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
a__ : Optional[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
a__ : str = self.vji + pd_j_all.T * bp_outa * self.rate_weight
a__ : Optional[int] = self.thre_bpa - pd_k_all * self.rate_thre
a__ : List[str] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
a__ : str = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
a__ : str = rp + 1
a__ : Tuple = error_count / patterns
all_mse.append(lowerCamelCase__ )
def draw_error():
a__ : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCamelCase__ , "+-" )
plt.plot(lowerCamelCase__ , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(lowerCamelCase__ , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : int ):
# model predict
a__ : Optional[int] = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(lowerCamelCase__ )) )
for p in range(len(lowerCamelCase__ ) ):
a__ : str = np.asmatrix(datas_test[p] )
a__, a__ : Tuple = self.convolute(
lowerCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a__ : Optional[int] = self.pooling(lowerCamelCase__ , self.size_poolinga )
a__ : Any = self._expand(lowerCamelCase__ )
a__ : Any = data_bp_input
a__ : str = bp_outa * self.vji.T - self.thre_bpa
a__ : Tuple = self.sig(lowerCamelCase__ )
a__ : Any = bp_outa * self.wkj.T - self.thre_bpa
a__ : List[str] = self.sig(lowerCamelCase__ )
produce_out.extend(bp_outa.getA().tolist() )
a__ : List[str] = [list(map(self.do_round , lowerCamelCase__ ) ) for each in produce_out]
return np.asarray(lowerCamelCase__ )
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Any ):
# return the data of image after convoluting process so we can check it out
a__ : List[Any] = np.asmatrix(lowerCamelCase__ )
a__, a__ : List[Any] = self.convolute(
lowerCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a__ : Any = self.pooling(lowerCamelCase__ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 37
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {}
class a_ (_a ):
__lowerCAmelCase : int = """llama"""
__lowerCAmelCase : Tuple = ["""past_key_values"""]
def __init__( self , snake_case_=3_2_0_0_0 , snake_case_=4_0_9_6 , snake_case_=1_1_0_0_8 , snake_case_=3_2 , snake_case_=3_2 , snake_case_=None , snake_case_="silu" , snake_case_=2_0_4_8 , snake_case_=0.02 , snake_case_=1E-6 , snake_case_=True , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=1 , snake_case_=False , snake_case_=None , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = vocab_size
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : List[Any] = intermediate_size
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Optional[Any] = num_key_value_heads
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[int] = rms_norm_eps
_lowerCAmelCase : Dict = pretraining_tp
_lowerCAmelCase : Any = use_cache
_lowerCAmelCase : Optional[int] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , tie_word_embeddings=snake_case_ , **snake_case_ , )
def __UpperCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'got {self.rope_scaling}' )
_lowerCAmelCase : Optional[Any] = self.rope_scaling.get("""type""" , snake_case_ )
_lowerCAmelCase : List[Any] = self.rope_scaling.get("""factor""" , snake_case_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(snake_case_ , snake_case_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 384
| 0
|
def UpperCamelCase ( _A : str )-> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(_A ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 719
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase_ : int = pytest.mark.integration
@require_faiss
class UpperCamelCase ( _UpperCAmelCase ):
def __A ( self ):
A__ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(UpperCAmelCase__ ) for x in np.arange(30 ).tolist()]} )
return dset
def __A ( self ):
import faiss
A__ = self._create_dummy_dataset()
A__ = dset.map(
lambda UpperCAmelCase__ , UpperCAmelCase__ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ )
A__ = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
A__ , A__ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def __A ( self ):
import faiss
A__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
A__ , A__ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def __A ( self ):
import faiss
A__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase__ ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
A__ , A__ = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def __A ( self ):
A__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(UpperCAmelCase__ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def __A ( self ):
from elasticsearch import Elasticsearch
A__ = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
A__ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
A__ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
A__ = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=UpperCAmelCase__ )
A__ , A__ = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class UpperCamelCase ( _UpperCAmelCase ):
def __A ( self ):
import faiss
A__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
A__ = np.zeros(5 , dtype=np.floataa )
A__ = 1
A__ , A__ = index.search(UpperCAmelCase__ )
self.assertRaises(UpperCAmelCase__ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
A__ = np.eye(5 , dtype=np.floataa )[::-1]
A__ , A__ = index.search_batch(UpperCAmelCase__ )
self.assertRaises(UpperCAmelCase__ , index.search_batch , queries[0] )
A__ = [scores[0] for scores in total_scores]
A__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase__ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCAmelCase__ )
def __A ( self ):
import faiss
A__ = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
A__ = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCAmelCase__ ):
A__ = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def __A ( self ):
import faiss
A__ = faiss.IndexFlat(5 )
A__ = FaissIndex(custom_index=UpperCAmelCase__ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __A ( self ):
import faiss
A__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase__ ) as tmp_file:
index.save(tmp_file.name )
A__ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
A__ = np.zeros(5 , dtype=np.floataa )
A__ = 1
A__ , A__ = index.search(UpperCAmelCase__ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def UpperCamelCase ( _A : Union[str, Any] )-> List[Any]:
"""simple docstring"""
import faiss
A__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
A__ = "index.faiss"
A__ = f"""mock://{index_name}"""
index.save(_A , storage_options=mockfs.storage_options )
A__ = FaissIndex.load(_A , storage_options=mockfs.storage_options )
A__ = np.zeros(5 , dtype=np.floataa )
A__ = 1
A__ , A__ = index.search(_A )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class UpperCamelCase ( _UpperCAmelCase ):
def __A ( self ):
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
A__ = Elasticsearch()
A__ = {"acknowledged": True}
A__ = ElasticSearchIndex(es_client=UpperCAmelCase__ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
A__ = "foo"
A__ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
A__ , A__ = index.search(UpperCAmelCase__ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
A__ = "foo"
A__ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
A__ , A__ = index.search(UpperCAmelCase__ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
A__ = ["foo", "bar", "foobar"]
A__ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
A__ , A__ = index.search_batch(UpperCAmelCase__ )
A__ = [scores[0] for scores in total_scores]
A__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase__ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase__ )
# batched queries with timeout
A__ = ["foo", "bar", "foobar"]
A__ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
A__ , A__ = index.search_batch(UpperCAmelCase__ , request_timeout=30 )
A__ = [scores[0] for scores in total_scores]
A__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase__ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase__ )
| 232
| 0
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class UpperCamelCase__ (datasets.BeamBasedBuilder ):
'''simple docstring'''
def _lowercase ( self ) -> int:
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=UpperCamelCase__ , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCamelCase__ )
class UpperCamelCase__ (datasets.BeamBasedBuilder ):
'''simple docstring'''
def _lowercase ( self ) -> Any:
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=UpperCamelCase__ , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCamelCase__ )
def A ( ) -> int:
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def A ( ) -> Dict:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
@require_beam
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase : Dict = DummyBeamDataset(cache_dir=UpperCamelCase__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCamelCase__ , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
lowerCamelCase : str = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , UpperCamelCase__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , UpperCamelCase__ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def _lowercase ( self ) -> Optional[int]:
import apache_beam as beam
lowerCamelCase : Union[str, Any] = beam.io.parquetio.WriteToParquet
lowerCamelCase : List[Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase : Optional[Any] = DummyBeamDataset(cache_dir=UpperCamelCase__ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
lowerCamelCase : int = partial(UpperCamelCase__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
UpperCamelCase__ , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
UpperCamelCase__ , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
lowerCamelCase : List[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , UpperCamelCase__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , UpperCamelCase__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def _lowercase ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase : Dict = DummyBeamDataset(cache_dir=UpperCamelCase__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : Optional[int] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase : Optional[Any] = NestedBeamDataset(cache_dir=UpperCamelCase__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCamelCase__ , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
lowerCamelCase : List[str] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , UpperCamelCase__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , UpperCamelCase__ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 311
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowerCamelCase : Tuple = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm1.weight''', f'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm1.bias''', f'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.weight''', f'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.bias''', f'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm2.weight''', f'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm2.bias''', f'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.weight''', f'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.bias''', f'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc2.weight''', f'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.mlp.fc2.bias''', f'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowerCamelCase : Dict = state_dict.pop(f'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : Dict = in_proj_weight[
: encoder_config.hidden_size, :
]
lowerCamelCase : int = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowerCamelCase : int = in_proj_weight[
-encoder_config.hidden_size :, :
]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowerCamelCase : Optional[int] = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = val
def A ( _SCREAMING_SNAKE_CASE ) -> int:
if "handwritten" in checkpoint_url:
lowerCamelCase : Optional[Any] = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCamelCase : Optional[int] = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
lowerCamelCase : Dict = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw ).convert("RGB" )
return im
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
lowerCamelCase : Optional[Any] = ViTConfig(image_size=384 ,qkv_bias=_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowerCamelCase : Tuple = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
lowerCamelCase : str = 1024
lowerCamelCase : Any = 4096
lowerCamelCase : str = 24
lowerCamelCase : Optional[Any] = 16
lowerCamelCase : Any = 1024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = "relu"
lowerCamelCase : str = 1024
lowerCamelCase : Optional[int] = True
lowerCamelCase : Any = False
lowerCamelCase : Any = False
# load HuggingFace model
lowerCamelCase : Dict = ViTModel(_SCREAMING_SNAKE_CASE ,add_pooling_layer=_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = TrOCRForCausalLM(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = VisionEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
model.eval()
# load state_dict of original model, rename some keys
lowerCamelCase : Union[str, Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE ,map_location="cpu" ,check_hash=_SCREAMING_SNAKE_CASE )["model"]
lowerCamelCase : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowerCamelCase : List[str] = state_dict.pop(_SCREAMING_SNAKE_CASE )
if key.startswith("decoder" ) and "output_projection" not in key:
lowerCamelCase : Optional[int] = val
else:
lowerCamelCase : int = val
# load state dict
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image
lowerCamelCase : Union[str, Any] = ViTImageProcessor(size=encoder_config.image_size )
lowerCamelCase : Any = RobertaTokenizer.from_pretrained("roberta-large" )
lowerCamelCase : Tuple = TrOCRProcessor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = processor(images=prepare_img(_SCREAMING_SNAKE_CASE ) ,return_tensors="pt" ).pixel_values
# verify logits
lowerCamelCase : Any = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowerCamelCase : int = model(pixel_values=_SCREAMING_SNAKE_CASE ,decoder_input_ids=_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = outputs.logits
lowerCamelCase : Any = torch.Size([1, 1, 5_0265] )
if "trocr-base-handwritten" in checkpoint_url:
lowerCamelCase : Tuple = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
lowerCamelCase : Dict = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
lowerCamelCase : Optional[Any] = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
lowerCamelCase : List[Any] = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ), "First elements of logits not as expected"
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 311
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = '▁'
__snake_case = {'vocab_file': 'spiece.model'}
__snake_case = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
__snake_case = {
'google/pegasus-xsum': 512,
}
__snake_case = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
_a : Any = VOCAB_FILES_NAMES
_a : Any = VOCAB_FILES_NAMES
_a : List[str] = PRETRAINED_VOCAB_FILES_MAP
_a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : str = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__ , lowerCamelCase__="<pad>" , lowerCamelCase__="</s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<mask_2>" , lowerCamelCase__="<mask_1>" , lowerCamelCase__=None , lowerCamelCase__=103 , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> None:
lowercase__ : Dict = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError(
F'''additional_special_tokens should be of type {type(lowerCamelCase__ )}, but is'''
F''' {type(lowerCamelCase__ )}''' )
lowercase__ : Union[str, Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(lowerCamelCase__ ) , self.offset - 1 )
]
if len(set(lowerCamelCase__ ) ) != len(lowerCamelCase__ ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
lowercase__ : Optional[int] = additional_special_tokens_extended
else:
lowercase__ : str = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 , self.offset )]
lowercase__ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token_sent=lowerCamelCase__ , offset=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
lowercase__ : Tuple = mask_token_sent
lowercase__ : Any = vocab_file
lowercase__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
# add special tokens to encoder dict
lowercase__ : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowercase__ : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def UpperCAmelCase__( self ) -> int:
return len(self.sp_model ) + self.offset
def UpperCAmelCase__( self ) -> Dict[str, int]:
lowercase__ : str = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
lowercase__ : Dict = self.__dict__.copy()
lowercase__ : Tuple = None
return state
def __setstate__( self , lowerCamelCase__ ) -> Optional[Any]:
lowercase__ : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase__ : Optional[Any] = {}
lowercase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[str]:
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowercase__ : Tuple = self.sp_model.piece_to_id(lowerCamelCase__ )
return sp_id + self.offset
def UpperCAmelCase__( self , lowerCamelCase__ ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowercase__ : str = self.sp_model.IdToPiece(index - self.offset )
return token
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Optional[Any]:
lowercase__ : Optional[int] = []
lowercase__ : int = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
lowercase__ : Union[str, Any] = []
else:
current_sub_tokens.append(lowerCamelCase__ )
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def UpperCAmelCase__( self , lowerCamelCase__=False ) -> Dict:
return 1
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
lowercase__ : str = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase__ )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ : Tuple = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , """wb""" ) as fi:
lowercase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 128
|
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__snake_case = get_logger(__name__)
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : Union[str, Any] = '''dummy_data'''
_a : Any = '''datasets'''
_a : List[str] = False
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = True , lowerCamelCase__ = None , ) -> Union[str, Any]:
lowercase__ : Optional[Any] = 0
lowercase__ : Dict = dataset_name
lowercase__ : Optional[Any] = cache_dir
lowercase__ : Optional[int] = use_local_dummy_data
lowercase__ : Optional[Any] = config
# download_callbacks take a single url as input
lowercase__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase__ : List[str] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase__ : int = str(lowerCamelCase__ )
# to be downloaded
lowercase__ : Tuple = None
lowercase__ : Dict = None
@property
def UpperCAmelCase__( self ) -> List[str]:
if self._dummy_file is None:
lowercase__ : Optional[int] = self.download_dummy_data()
return self._dummy_file
@property
def UpperCAmelCase__( self ) -> int:
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def UpperCAmelCase__( self ) -> Optional[int]:
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def UpperCAmelCase__( self ) -> int:
lowercase__ : int = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase__ : int = cached_path(
lowerCamelCase__ , cache_dir=self.cache_dir , extract_compressed_file=lowerCamelCase__ , force_extract=lowerCamelCase__ )
return os.path.join(lowerCamelCase__ , self.dummy_file_name )
@property
def UpperCAmelCase__( self ) -> Optional[int]:
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def UpperCAmelCase__( self ) -> Optional[Any]:
if self._bucket_url is None:
lowercase__ : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def UpperCAmelCase__( self ) -> Union[str, Any]:
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def UpperCAmelCase__( self , lowerCamelCase__ , *lowerCamelCase__ ) -> Union[str, Any]:
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase__ : Dict = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase__ : Tuple = self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return self.create_dummy_data_dict(lowerCamelCase__ , lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , (list, tuple) ):
return self.create_dummy_data_list(lowerCamelCase__ , lowerCamelCase__ )
else:
return self.create_dummy_data_single(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ , *lowerCamelCase__ ) -> Optional[int]:
return self.download_and_extract(lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
return self.download_and_extract(lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
return path
def UpperCAmelCase__( self ) -> int:
return {}
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
lowercase__ : Optional[Any] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
for single_url in single_urls:
download_callback(lowerCamelCase__ )
else:
lowercase__ : Dict = single_urls
download_callback(lowerCamelCase__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Any = [os.path.join(lowerCamelCase__ , urllib.parse.quote_plus(Path(lowerCamelCase__ ).name ) ) for x in single_urls]
else:
lowercase__ : Any = single_urls
lowercase__ : int = os.path.join(lowerCamelCase__ , urllib.parse.quote_plus(Path(lowerCamelCase__ ).name ) )
lowercase__ : Union[str, Any] = value
# make sure that values are unique
if all(isinstance(lowerCamelCase__ , lowerCamelCase__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase__ : Any = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
lowercase__ : int = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase__ : Tuple = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , lowerCamelCase__ ) ) for url in data_url )
lowercase__ : Optional[Any] = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase__ : List[str] = [data_url[0]] * len(lowerCamelCase__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowerCamelCase__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase__ : Dict = os.path.join(lowerCamelCase__ , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(lowerCamelCase__ )
return dummy_data_list
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
for download_callback in self.download_callbacks:
download_callback(lowerCamelCase__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase__ : Optional[Any] = os.path.join(lowerCamelCase__ , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(lowerCamelCase__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCAmelCase__( self ) -> str:
pass
def UpperCAmelCase__( self ) -> Optional[Any]:
pass
def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[str]:
def _iter_archive_members(lowerCamelCase__ ):
# this preserves the order of the members inside the ZIP archive
lowercase__ : Optional[int] = Path(self.dummy_file ).parent
lowercase__ : int = path.relative_to(lowerCamelCase__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase__ : Tuple = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(lowerCamelCase__ )
lowercase__ : List[str] = Path(lowerCamelCase__ )
lowercase__ : Optional[int] = _iter_archive_members(lowerCamelCase__ ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(lowerCamelCase__ ).as_posix(), file_path.open("""rb""" )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[Any]:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Union[str, Any] = [paths]
for path in paths:
if os.path.isfile(lowerCamelCase__ ):
if os.path.basename(lowerCamelCase__ ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowerCamelCase__ ):
if os.path.basename(lowerCamelCase__ ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(lowerCamelCase__ ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(lowerCamelCase__ , lowerCamelCase__ )
| 128
| 1
|
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""",
},
"""merges_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""Salesforce/codegen-350M-mono""": (
"""https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase__ = {
"""Salesforce/codegen-350M-mono""": 2_0_4_8,
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ['input_ids', 'attention_mask']
__lowerCamelCase = CodeGenTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="<|endoftext|>" , lowercase="<|endoftext|>" , lowercase="<|endoftext|>" , lowercase=False , **lowercase , ) -> int:
'''simple docstring'''
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , unk_token=lowercase , bos_token=lowercase , eos_token=lowercase , add_prefix_space=lowercase , **lowercase , )
if kwargs.pop("add_bos_token" , lowercase ):
A__ = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
F'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
F'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
A__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowercase ) != add_prefix_space:
A__ = getattr(lowercase , pre_tok_state.pop("type" ) )
A__ = add_prefix_space
A__ = pre_tok_class(**lowercase )
A__ = add_prefix_space
def UpperCamelCase ( self , *lowercase , **lowercase ) -> BatchEncoding:
'''simple docstring'''
A__ = kwargs.get("is_split_into_words" , lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase , **lowercase )
def UpperCamelCase ( self , *lowercase , **lowercase ) -> BatchEncoding:
'''simple docstring'''
A__ = kwargs.get("is_split_into_words" , lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
'''simple docstring'''
A__ = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def UpperCamelCase ( self , lowercase , lowercase = False , lowercase = None , lowercase = None , **lowercase , ) -> str:
'''simple docstring'''
A__ = super().decode(
token_ids=lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase , **lowercase , )
if truncate_before_pattern is not None and len(lowercase ) > 0:
A__ = self.truncate(lowercase , lowercase )
return decoded_text
def UpperCamelCase ( self , lowercase , lowercase ) -> Any:
'''simple docstring'''
def find_re(lowercase , lowercase , lowercase ):
A__ = pattern.search(lowercase , lowercase )
return m.start() if m else -1
A__ = [re.compile(lowercase , re.MULTILINE ) for pattern in truncate_before_pattern]
A__ = list(re.finditer("^print" , lowercase , re.MULTILINE ) )
if len(lowercase ) > 1:
A__ = completion[: prints[1].start()]
A__ = list(re.finditer("^def" , lowercase , re.MULTILINE ) )
if len(lowercase ) > 1:
A__ = completion[: defs[1].start()]
A__ = 0
A__ = [
pos for pos in [find_re(lowercase , lowercase , lowercase ) for terminal in terminals] if pos != -1
]
if len(lowercase ) > 0:
return completion[: min(lowercase )]
else:
return completion
| 514
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 1_0, "max_num_jobs": 1}, [range(1_0 )]),
({"num_shards": 1_0, "max_num_jobs": 1_0}, [range(SCREAMING_SNAKE_CASE_ , i + 1 ) for i in range(1_0 )]),
({"num_shards": 1, "max_num_jobs": 1_0}, [range(1 )]),
({"num_shards": 1_0, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 1_0 )]),
({"num_shards": 3, "max_num_jobs": 1_0}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
A__ = _distribute_shards(**SCREAMING_SNAKE_CASE_ )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 1_0, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Tuple ) -> List[str]:
'''simple docstring'''
A__ = _split_gen_kwargs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
_number_of_shards_in_gen_kwargs(SCREAMING_SNAKE_CASE_ )
else:
A__ = _number_of_shards_in_gen_kwargs(SCREAMING_SNAKE_CASE_ )
assert out == expected
| 514
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"""vocab_file""": """spiece.model"""}
_snake_case = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
_snake_case = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
_snake_case = 0
_snake_case = 1
_snake_case = 2
_snake_case = 3
_snake_case = 4
class lowerCAmelCase ( lowercase__ ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = 'left'
def __init__( self :Tuple , _lowercase :List[Any] , _lowercase :List[Any]=False , _lowercase :Dict=True , _lowercase :List[str]=False , _lowercase :List[str]="<s>" , _lowercase :Any="</s>" , _lowercase :Optional[int]="<unk>" , _lowercase :Tuple="<sep>" , _lowercase :Tuple="<pad>" , _lowercase :Optional[int]="<cls>" , _lowercase :Dict="<mask>" , _lowercase :Union[str, Any]=["<eop>", "<eod>"] , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Dict , ):
'''simple docstring'''
lowercase__ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
lowercase__ = 3
lowercase__ = do_lower_case
lowercase__ = remove_space
lowercase__ = keep_accents
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :List[Any] ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self :Any , _lowercase :List[str] ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self :str , _lowercase :Optional[Any] ):
'''simple docstring'''
if self.remove_space:
lowercase__ = ''' '''.join(inputs.strip().split() )
else:
lowercase__ = inputs
lowercase__ = outputs.replace("``" , "\"" ).replace("\'\'" , "\"" )
if not self.keep_accents:
lowercase__ = unicodedata.normalize("NFKD" , UpperCAmelCase__ )
lowercase__ = ''''''.join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] )
if self.do_lower_case:
lowercase__ = outputs.lower()
return outputs
def UpperCAmelCase ( self :Optional[Any] , _lowercase :str ):
'''simple docstring'''
lowercase__ = self.preprocess_text(UpperCAmelCase__ )
lowercase__ = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
lowercase__ = []
for piece in pieces:
if len(UpperCAmelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowercase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase__ = cur_pieces[1:]
else:
lowercase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase__ )
else:
new_pieces.append(UpperCAmelCase__ )
return new_pieces
def UpperCAmelCase ( self :List[Any] , _lowercase :Optional[int] ):
'''simple docstring'''
return self.sp_model.PieceToId(UpperCAmelCase__ )
def UpperCAmelCase ( self :Tuple , _lowercase :Dict ):
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCAmelCase__ )
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :str ):
'''simple docstring'''
lowercase__ = ''''''.join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , " " ).strip()
return out_string
def UpperCAmelCase ( self :Optional[Any] , _lowercase :List[int] , _lowercase :bool = False , _lowercase :bool = None , _lowercase :bool = True , **_lowercase :Union[str, Any] , ):
'''simple docstring'''
lowercase__ = kwargs.pop("use_source_tokenizer" , UpperCAmelCase__ )
lowercase__ = self.convert_ids_to_tokens(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowercase__ = []
lowercase__ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase__ ) )
lowercase__ = []
sub_texts.append(UpperCAmelCase__ )
else:
current_sub_text.append(UpperCAmelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowercase__ = ''''''.join(UpperCAmelCase__ )
lowercase__ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowercase__ = self.clean_up_tokenization(UpperCAmelCase__ )
return clean_text
else:
return text
def UpperCAmelCase ( self :str , _lowercase :List[int] , _lowercase :Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase ( self :str , _lowercase :List[int] , _lowercase :Optional[List[int]] = None , _lowercase :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1]
return ([0] * len(UpperCAmelCase__ )) + [1, 1]
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase ( self :Any , _lowercase :str , _lowercase :Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , "wb" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
| 700
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'visual_bert'
def __init__( self :Dict , _lowercase :Union[str, Any]=3_05_22 , _lowercase :List[Any]=7_68 , _lowercase :List[Any]=5_12 , _lowercase :List[str]=12 , _lowercase :Tuple=12 , _lowercase :Optional[Any]=30_72 , _lowercase :int="gelu" , _lowercase :Any=0.1 , _lowercase :Union[str, Any]=0.1 , _lowercase :str=5_12 , _lowercase :str=2 , _lowercase :Optional[int]=0.02 , _lowercase :Tuple=1e-12 , _lowercase :Optional[int]=False , _lowercase :List[str]=True , _lowercase :Union[str, Any]=1 , _lowercase :List[Any]=0 , _lowercase :int=2 , **_lowercase :List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = visual_embedding_dim
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = type_vocab_size
lowercase__ = layer_norm_eps
lowercase__ = bypass_transformer
lowercase__ = special_visual_initialize
| 611
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
SCREAMING_SNAKE_CASE__:Dict = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ):
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 528
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
__a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase ) )
def a__ ( self ):
__a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase ) )
def a__ ( self ):
__a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase ) )
def a__ ( self ):
__a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase ) )
def a__ ( self ):
__a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase ) )
def a__ ( self ):
__a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
__a = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def a__ ( self ):
__a = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
__a = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def a__ ( self ):
# pass variant but use the non-variant filenames
__a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
__a = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def a__ ( self ):
__a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__a = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def a__ ( self ):
__a = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
__a = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def a__ ( self ):
# pass variant but use the non-variant filenames
__a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
__a = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def a__ ( self ):
__a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
__a = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
| 528
| 1
|
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
A_ :Tuple = logging.get_logger(__name__)
def A ( ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] =os.getenv('SM_HP_MP_PARAMETERS' ,'{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__UpperCamelCase : List[str] =json.loads(a_ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__UpperCamelCase : Optional[int] =os.getenv('SM_FRAMEWORK_PARAMS' ,'{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__UpperCamelCase : Dict =json.loads(a_ )
if not mpi_options.get('sagemaker_mpi_enabled' ,a_ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class __A ( __lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase__ : str =field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def __lowercase ( self ):
"""simple docstring"""
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , lowerCamelCase__ , )
@cached_property
def __lowercase ( self ):
"""simple docstring"""
logger.info('PyTorch: setting up devices' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' )
if self.no_cuda:
__UpperCamelCase : Union[str, Any] =torch.device('cpu' )
__UpperCamelCase : Union[str, Any] =0
elif is_sagemaker_model_parallel_available():
__UpperCamelCase : Union[str, Any] =smp.local_rank()
__UpperCamelCase : str =torch.device('cuda' , lowerCamelCase__ )
__UpperCamelCase : Tuple =1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta )
__UpperCamelCase : Optional[int] =int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) )
__UpperCamelCase : Optional[Any] =torch.device('cuda' , self.local_rank )
__UpperCamelCase : List[Any] =1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__UpperCamelCase : List[str] =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__UpperCamelCase : Union[str, Any] =torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta )
__UpperCamelCase : Any =torch.device('cuda' , self.local_rank )
__UpperCamelCase : Any =1
if device.type == "cuda":
torch.cuda.set_device(lowerCamelCase__ )
return device
@property
def __lowercase ( self ):
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __lowercase ( self ):
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def __lowercase ( self ):
"""simple docstring"""
return False
| 718
|
A_ :str = '''Tobias Carryer'''
from time import time
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=int(time() ) ): # noqa: B008
"""simple docstring"""
__UpperCamelCase : Optional[int] =multiplier
__UpperCamelCase : str =increment
__UpperCamelCase : Union[str, Any] =modulo
__UpperCamelCase : str =seed
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =(self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
A_ :Optional[int] = LinearCongruentialGenerator(1664525, 1013904223, 2 << 31)
while True:
print(lcg.next_number())
| 154
| 0
|
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _lowerCAmelCase ( lowerCamelCase_ : dict ):
return (data["data"], data["target"])
def _lowerCAmelCase ( lowerCamelCase_ : np.ndarray , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : np.ndarray ):
__lowercase = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(lowerCamelCase_ , lowerCamelCase_ )
# Predict target for test data
__lowercase = xgb.predict(lowerCamelCase_ )
__lowercase = predictions.reshape(len(lowerCamelCase_ ) , 1 )
return predictions
def _lowerCAmelCase ( ):
__lowercase = fetch_california_housing()
__lowercase , __lowercase = data_handling(lowerCamelCase_ )
__lowercase , __lowercase , __lowercase , __lowercase = train_test_split(
lowerCamelCase_ , lowerCamelCase_ , test_size=0.25 , random_state=1 )
__lowercase = xgboost(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Error printing
print(f"Mean Absolute Error : {mean_absolute_error(lowerCamelCase_ , lowerCamelCase_ )}" )
print(f"Mean Square Error : {mean_squared_error(lowerCamelCase_ , lowerCamelCase_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 502
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : int ):
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(lowerCamelCase_ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 502
| 1
|
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __A ( ctypes.Structure ):
"""simple docstring"""
A_ = [('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def _lowerCAmelCase ( ) ->Optional[int]:
"""simple docstring"""
if os.name == "nt":
lowercase__ = CursorInfo()
lowercase__ = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowercase , ctypes.byref(lowercase ) )
lowercase__ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowercase , ctypes.byref(lowercase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def _lowerCAmelCase ( ) ->Union[str, Any]:
"""simple docstring"""
if os.name == "nt":
lowercase__ = CursorInfo()
lowercase__ = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowercase , ctypes.byref(lowercase ) )
lowercase__ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowercase , ctypes.byref(lowercase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def _lowerCAmelCase ( ) ->int:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 318
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_lowerCAmelCase = 8
def _lowerCAmelCase ( lowercase : str , lowercase : Optional[int]=BITS ) ->Optional[Any]:
"""simple docstring"""
lowercase__ = x.device
lowercase__ = (x * 2_5_5).int().clamp(0 , 2_5_5 )
lowercase__ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowercase )
lowercase__ = rearrange(lowercase , '''d -> d 1 1''' )
lowercase__ = rearrange(lowercase , '''b c h w -> b c 1 h w''' )
lowercase__ = ((x & mask) != 0).float()
lowercase__ = rearrange(lowercase , '''b c d h w -> b (c d) h w''' )
lowercase__ = bits * 2 - 1
return bits
def _lowerCAmelCase ( lowercase : Union[str, Any] , lowercase : Optional[int]=BITS ) ->Union[str, Any]:
"""simple docstring"""
lowercase__ = x.device
lowercase__ = (x > 0).int()
lowercase__ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowercase , dtype=torch.intaa )
lowercase__ = rearrange(lowercase , '''d -> d 1 1''' )
lowercase__ = rearrange(lowercase , '''b (c d) h w -> b c d h w''' , d=8 )
lowercase__ = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 2_5_5).clamp(0.0 , 1.0 )
def _lowerCAmelCase ( self : List[str] , lowercase : torch.FloatTensor , lowercase : int , lowercase : torch.FloatTensor , lowercase : float = 0.0 , lowercase : bool = True , lowercase : Dict=None , lowercase : bool = True , ) ->Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowercase__ = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowercase__ = self.alphas_cumprod[timestep]
lowercase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowercase__ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowercase__ = self.bit_scale
if self.config.clip_sample:
lowercase__ = torch.clamp(lowercase , -scale , lowercase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowercase__ = self._get_variance(lowercase , lowercase )
lowercase__ = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowercase__ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowercase__ = model_output.device if torch.is_tensor(lowercase ) else '''cpu'''
lowercase__ = torch.randn(model_output.shape , dtype=model_output.dtype , generator=lowercase ).to(lowercase )
lowercase__ = self._get_variance(lowercase , lowercase ) ** 0.5 * eta * noise
lowercase__ = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowercase , pred_original_sample=lowercase )
def _lowerCAmelCase ( self : Union[str, Any] , lowercase : torch.FloatTensor , lowercase : int , lowercase : torch.FloatTensor , lowercase : List[Any]="epsilon" , lowercase : Dict=None , lowercase : bool = True , ) ->Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
lowercase__ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ = torch.split(lowercase , sample.shape[1] , dim=1 )
else:
lowercase__ = None
# 1. compute alphas, betas
lowercase__ = self.alphas_cumprod[t]
lowercase__ = self.alphas_cumprod[t - 1] if t > 0 else self.one
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowercase__ = model_output
else:
raise ValueError(F'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
lowercase__ = self.bit_scale
if self.config.clip_sample:
lowercase__ = torch.clamp(lowercase , -scale , lowercase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowercase__ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowercase__ = 0
if t > 0:
lowercase__ = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=lowercase ).to(model_output.device )
lowercase__ = (self._get_variance(lowercase , predicted_variance=lowercase ) ** 0.5) * noise
lowercase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowercase , pred_original_sample=lowercase )
class __A ( a ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1.0 , )-> Any:
super().__init__()
lowercase__ = bit_scale
lowercase__ = (
ddim_bit_scheduler_step if isinstance(_lowerCamelCase , _lowerCamelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self , _lowerCamelCase = 2_5_6 , _lowerCamelCase = 2_5_6 , _lowerCamelCase = 5_0 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = "pil" , _lowerCamelCase = True , **_lowerCamelCase , )-> Union[Tuple, ImagePipelineOutput]:
lowercase__ = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=_lowerCamelCase , )
lowercase__ = decimal_to_bits(_lowerCamelCase ) * self.bit_scale
lowercase__ = latents.to(self.device )
self.scheduler.set_timesteps(_lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowercase__ = self.unet(_lowerCamelCase , _lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
lowercase__ = bits_to_decimal(_lowerCamelCase )
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 318
| 1
|
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Any = inspect.getfile(accelerate.test_utils )
UpperCamelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
UpperCamelCase : List[str] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def snake_case_ ( self ) -> Dict:
UpperCamelCase : List[str] = F"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
UpperCamelCase : Any = [sys.executable] + distributed_args
execute_subprocess_async(SCREAMING_SNAKE_CASE_, env=os.environ.copy() )
| 40
|
def _a ( lowerCamelCase ):
if num < 0:
return False
lowerCamelCase : int = num
lowerCamelCase : int = 0
while num > 0:
lowerCamelCase : str = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681
| 0
|
_UpperCamelCase : str ={
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def a__ (__lowercase :float ) -> int:
assert type(UpperCAmelCase__ ) in (int, float) and decimal == int(UpperCAmelCase__ )
_A : Optional[Any] = int(UpperCAmelCase__ )
_A : Any = ''''''
_A : Optional[Any] = False
if decimal < 0:
_A : Optional[int] = True
decimal *= -1
while decimal > 0:
_A , _A : List[Any] = divmod(UpperCAmelCase__ , 16 )
_A : int = values[remainder] + hexadecimal
_A : str = '''0x''' + hexadecimal
if negative:
_A : Any = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=__snake_case ):
__snake_case : Optional[Any] = ["note_seq"]
def __init__( self ,*A__ ,**A__ ):
requires_backends(self ,['''note_seq'''] )
@classmethod
def A__ ( cls ,*A__ ,**A__ ):
requires_backends(cls ,['''note_seq'''] )
@classmethod
def A__ ( cls ,*A__ ,**A__ ):
requires_backends(cls ,['''note_seq'''] )
| 332
| 0
|
def lowerCAmelCase_ ( A_ ,A_):
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"{price_plus_tax(100, 0.25) = }")
print(f"{price_plus_tax(125.50, 0.05) = }")
| 380
|
'''simple docstring'''
SCREAMING_SNAKE_CASE = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
SCREAMING_SNAKE_CASE = frozenset(['prompt', 'negative_prompt'])
SCREAMING_SNAKE_CASE = frozenset([])
SCREAMING_SNAKE_CASE = frozenset(['image'])
SCREAMING_SNAKE_CASE = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
SCREAMING_SNAKE_CASE = frozenset(['image'])
SCREAMING_SNAKE_CASE = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
SCREAMING_SNAKE_CASE = frozenset(['prompt', 'image', 'negative_prompt'])
SCREAMING_SNAKE_CASE = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
SCREAMING_SNAKE_CASE = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
SCREAMING_SNAKE_CASE = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
SCREAMING_SNAKE_CASE = frozenset(['image', 'mask_image'])
SCREAMING_SNAKE_CASE = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
SCREAMING_SNAKE_CASE = frozenset(['example_image', 'image', 'mask_image'])
SCREAMING_SNAKE_CASE = frozenset(['class_labels'])
SCREAMING_SNAKE_CASE = frozenset(['class_labels'])
SCREAMING_SNAKE_CASE = frozenset(['batch_size'])
SCREAMING_SNAKE_CASE = frozenset([])
SCREAMING_SNAKE_CASE = frozenset(['batch_size'])
SCREAMING_SNAKE_CASE = frozenset([])
SCREAMING_SNAKE_CASE = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
SCREAMING_SNAKE_CASE = frozenset(['prompt', 'negative_prompt'])
SCREAMING_SNAKE_CASE = frozenset(['input_tokens'])
SCREAMING_SNAKE_CASE = frozenset(['input_tokens'])
| 94
| 0
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
a = logging.get_logger(__name__)
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __init__( self: Dict , *_UpperCAmelCase: Any , **_UpperCAmelCase: str ):
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 382
|
from __future__ import annotations
from math import pow, sqrt
def UpperCamelCase_( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ):
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(__magic_name__ , 2 ) - pow(__magic_name__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__magic_name__ , 2 ) - pow(__magic_name__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__magic_name__ , 2 ) + pow(__magic_name__ , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 382
| 1
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class _A :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = False ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = scheduler
UpperCamelCase__ = optimizers if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) else [optimizers]
UpperCamelCase__ = split_batches
UpperCamelCase__ = step_with_optimizer
UpperCamelCase__ = GradientState()
def _a (self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
UpperCamelCase__ = AcceleratorState().num_processes
for _ in range(SCREAMING_SNAKE_CASE_ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
else:
self.scheduler.step(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Optional[int]:
'''simple docstring'''
return self.scheduler.get_last_lr()
def _a (self ) -> Any:
'''simple docstring'''
return self.scheduler.state_dict()
def _a (self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
self.scheduler.load_state_dict(SCREAMING_SNAKE_CASE_ )
def _a (self ) -> List[str]:
'''simple docstring'''
return self.scheduler.get_lr()
def _a (self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
return self.scheduler.print_lr(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 415
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCamelCase ( A ):
UpperCamelCase__ = args.pruning_method
UpperCamelCase__ = args.threshold
UpperCamelCase__ = args.model_name_or_path.rstrip('''/''' )
UpperCamelCase__ = args.target_model_path
print(f"Load fine-pruned model from {model_name_or_path}" )
UpperCamelCase__ = torch.load(os.path.join(A , '''pytorch_model.bin''' ) )
UpperCamelCase__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
UpperCamelCase__ = tensor
print(f"Copied layer {name}" )
elif "classifier" in name or "qa_output" in name:
UpperCamelCase__ = tensor
print(f"Copied layer {name}" )
elif "bias" in name:
UpperCamelCase__ = tensor
print(f"Copied layer {name}" )
else:
if pruning_method == "magnitude":
UpperCamelCase__ = MagnitudeBinarizer.apply(inputs=A , threshold=A )
UpperCamelCase__ = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
UpperCamelCase__ = name[:-6]
UpperCamelCase__ = model[f"{prefix_}mask_scores"]
UpperCamelCase__ = TopKBinarizer.apply(A , A )
UpperCamelCase__ = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
UpperCamelCase__ = name[:-6]
UpperCamelCase__ = model[f"{prefix_}mask_scores"]
UpperCamelCase__ = ThresholdBinarizer.apply(A , A , A )
UpperCamelCase__ = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
UpperCamelCase__ = name[:-6]
UpperCamelCase__ = model[f"{prefix_}mask_scores"]
UpperCamelCase__ , UpperCamelCase__ = -0.1, 1.1
UpperCamelCase__ = torch.sigmoid(A )
UpperCamelCase__ = s * (r - l) + l
UpperCamelCase__ = s_bar.clamp(min=0.0 , max=1.0 )
UpperCamelCase__ = tensor * mask
print(f"Pruned layer {name}" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
UpperCamelCase__ = os.path.join(
os.path.dirname(A ) , f"bertarized_{os.path.basename(A )}" )
if not os.path.isdir(A ):
shutil.copytree(A , A )
print(f"\nCreated folder {target_model_path}" )
torch.save(A , os.path.join(A , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
__magic_name__ =argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
__magic_name__ =parser.parse_args()
main(args)
| 415
| 1
|
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __lowerCAmelCase ( snake_case : int ) -> str:
if "model" in orig_key:
__lowerCamelCase: str = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
__lowerCamelCase: List[Any] = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
__lowerCamelCase: Union[str, Any] = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
__lowerCamelCase: Optional[int] = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
__lowerCamelCase: Optional[int] = orig_key.split(""".""" )[0].split("""_""" )[-1]
__lowerCamelCase: Optional[Any] = orig_key.replace(f'transformer_{layer_num}' , f'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
__lowerCamelCase: Any = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
__lowerCamelCase: List[Any] = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
__lowerCamelCase: List[Any] = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
__lowerCamelCase: Tuple = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
__lowerCamelCase: str = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
__lowerCamelCase: Any = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
__lowerCamelCase: Dict = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
__lowerCamelCase: Union[str, Any] = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
__lowerCamelCase: Tuple = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
__lowerCamelCase: Union[str, Any] = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
__lowerCamelCase: Union[str, Any] = """yoso.""" + orig_key
return orig_key
def __lowerCAmelCase ( snake_case : Optional[Any] , snake_case : int ) -> int:
for key in orig_state_dict.copy().keys():
__lowerCamelCase: Optional[int] = orig_state_dict.pop(snake_case )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
__lowerCamelCase: List[Any] = val
__lowerCamelCase: Any = orig_state_dict["""cls.predictions.decoder.bias"""]
__lowerCamelCase: Dict = torch.arange(snake_case ).expand((1, -1) ) + 2
return orig_state_dict
def __lowerCAmelCase ( snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : int ) -> List[Any]:
__lowerCamelCase: List[Any] = torch.load(snake_case , map_location="""cpu""" )["""model_state_dict"""]
__lowerCamelCase: List[Any] = YosoConfig.from_json_file(snake_case )
__lowerCamelCase: Dict = YosoForMaskedLM(snake_case )
__lowerCamelCase: int = convert_checkpoint_helper(config.max_position_embeddings , snake_case )
print(model.load_state_dict(snake_case ) )
model.eval()
model.save_pretrained(snake_case )
print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_A : Optional[int] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 189
|
from __future__ import annotations
from typing import Any
class a :
def __init__( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = 0 ):
__lowerCamelCase , __lowerCamelCase: Dict = row, column
__lowerCamelCase: Optional[Any] = [[default_value for c in range(SCREAMING_SNAKE_CASE_ )] for r in range(SCREAMING_SNAKE_CASE_ )]
def __str__( self : Union[str, Any] ):
__lowerCamelCase: str = F'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
__lowerCamelCase: Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
__lowerCamelCase: int = max(SCREAMING_SNAKE_CASE_ , len(str(SCREAMING_SNAKE_CASE_ ) ) )
__lowerCamelCase: Union[str, Any] = F'%{max_element_length}s'
# Make string and return
def single_line(SCREAMING_SNAKE_CASE_ : list[float] ) -> str:
nonlocal string_format_identifier
__lowerCamelCase: Any = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(SCREAMING_SNAKE_CASE_ ) for row_vector in self.array )
return s
def __repr__( self : Union[str, Any] ):
return str(self )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : tuple[int, int] ):
if not (isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and len(SCREAMING_SNAKE_CASE_ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : tuple[int, int] ):
assert self.validate_indicies(SCREAMING_SNAKE_CASE_ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Any , SCREAMING_SNAKE_CASE_ : tuple[int, int] , SCREAMING_SNAKE_CASE_ : float ):
assert self.validate_indicies(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = value
def __add__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Matrix ):
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert self.row == another.row and self.column == another.column
# Add
__lowerCamelCase: Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__lowerCamelCase: Tuple = self[r, c] + another[r, c]
return result
def __neg__( self : Any ):
__lowerCamelCase: List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__lowerCamelCase: int = -self[r, c]
return result
def __sub__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Matrix ):
return self + (-another)
def __mul__( self : Tuple , SCREAMING_SNAKE_CASE_ : int | float | Matrix ):
if isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ): # Scalar multiplication
__lowerCamelCase: List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__lowerCamelCase: List[Any] = self[r, c] * another
return result
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # Matrix multiplication
assert self.column == another.row
__lowerCamelCase: Optional[int] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__lowerCamelCase: int = F'Unsupported type given for another ({type(SCREAMING_SNAKE_CASE_ )})'
raise TypeError(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
__lowerCamelCase: int = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__lowerCamelCase: List[str] = self[r, c]
return result
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Matrix , SCREAMING_SNAKE_CASE_ : Matrix ):
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__lowerCamelCase: Optional[Any] = v.transpose()
__lowerCamelCase: Any = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __lowerCAmelCase ( ) -> None:
# a^(-1)
__lowerCamelCase: int = Matrix(3 , 3 , 0 )
for i in range(3 ):
__lowerCamelCase: List[Any] = 1
print(f'a^(-1) is {ainv}' )
# u, v
__lowerCamelCase: List[str] = Matrix(3 , 1 , 0 )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase: Optional[Any] = 1, 2, -3
__lowerCamelCase: Dict = Matrix(3 , 1 , 0 )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase: Union[str, Any] = 4, -2, 5
print(f'u is {u}' )
print(f'v is {v}' )
print(f'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(snake_case , snake_case )}' )
def __lowerCAmelCase ( ) -> None:
import doctest
doctest.testmod()
testa()
| 189
| 1
|
"""simple docstring"""
import copy
import re
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str = '''hp'''
_lowerCamelCase: List[Any] = {}
_lowerCamelCase: List[Any] = None
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : List[str] ,A_ : Optional[Any] ) -> Tuple:
A = prefix
A = defaults
cls.build_naming_info()
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : Any ,A_ : List[Any] ) -> int:
if len(A_ ) == 0:
return ""
A = None
if any(char.isdigit() for char in word ):
raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 ,len(A_ ) + 1 ):
A = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
A = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(A_ : Optional[Any] ):
A = ''
while integer != 0:
A = chr(ord('A' ) + integer % 10 ) + s
integer //= 10
return s
A = 0
while True:
A = word + '#' + int_to_alphabetic(A_ )
if sword in info["reverse_short_word"]:
continue
else:
A = sword
break
A = short_word
A = word
return short_word
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]:
A = param_name.split('_' )
A = [TrialShortNamer.shortname_for_word(A_ ,A_ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
A = ['', '_']
for separator in separators:
A = separator.join(A_ )
if shortname not in info["reverse_short_param"]:
A = shortname
A = param_name
return shortname
return param_name
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Any ) -> Tuple:
A = TrialShortNamer.shortname_for_key(A_ ,A_ )
A = short_name
A = param_name
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ) -> List[Any]:
if cls.NAMING_INFO is not None:
return
A = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
A = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(A_ ,A_ )
A = info
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]:
cls.build_naming_info()
assert cls.PREFIX is not None
A = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
A = cls.NAMING_INFO['short_param'][k]
if isinstance(A_ ,A_ ):
A = 1 if v else 0
A = '' if isinstance(A_ ,(int, float) ) else '-'
A = F'{key}{sep}{v}'
name.append(A_ )
return "_".join(A_ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,A_ : Any ) -> int:
A = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
A = []
else:
A = repr.split('_' )
A = {}
for value in values:
if "-" in value:
A , A = value.split('-' )
else:
A = re.sub('[0-9.]' ,'' ,A_ )
A = float(re.sub('[^0-9.]' ,'' ,A_ ) )
A = cls.NAMING_INFO['reverse_short_param'][p_k]
A = p_v
for k in cls.DEFAULTS:
if k not in parameters:
A = cls.DEFAULTS[k]
return parameters
| 91
|
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_SCREAMING_SNAKE_CASE =np.full((len(_UpperCamelCase ), sequence_length, 2) , _UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE =np.full((len(_UpperCamelCase ), sequence_length) , _UpperCamelCase )
for i, tensor in enumerate(_UpperCamelCase ):
if padding_side == "right":
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_SCREAMING_SNAKE_CASE =tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE =tensor[:sequence_length]
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_SCREAMING_SNAKE_CASE =tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE =tensor[:sequence_length]
return out_tensor.tolist()
def _lowerCAmelCase ( _UpperCamelCase : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ord(_UpperCamelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_SCREAMING_SNAKE_CASE =unicodedata.category(_UpperCamelCase )
if cat.startswith('P' ):
return True
return False
@dataclass
class A__ ( A__ ):
A__ = 42
A__ = True
A__ = None
A__ = None
A__ = -1_00
A__ = "pt"
def A ( self : Optional[Any] , _a : Tuple ) -> Any:
'''simple docstring'''
import torch
_SCREAMING_SNAKE_CASE ='label' if 'label' in features[0].keys() else 'labels'
_SCREAMING_SNAKE_CASE =[feature[label_name] for feature in features] if label_name in features[0].keys() else None
_SCREAMING_SNAKE_CASE =self.tokenizer.pad(
_a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' if labels is None else None , )
if labels is None:
return batch
_SCREAMING_SNAKE_CASE =torch.tensor(batch['entity_ids'] ).shape[1]
_SCREAMING_SNAKE_CASE =self.tokenizer.padding_side
if padding_side == "right":
_SCREAMING_SNAKE_CASE =[
list(_a ) + [self.label_pad_token_id] * (sequence_length - len(_a )) for label in labels
]
else:
_SCREAMING_SNAKE_CASE =[
[self.label_pad_token_id] * (sequence_length - len(_a )) + list(_a ) for label in labels
]
_SCREAMING_SNAKE_CASE =[feature['ner_tags'] for feature in features]
_SCREAMING_SNAKE_CASE =padding_tensor(_a , -1 , _a , _a )
_SCREAMING_SNAKE_CASE =[feature['original_entity_spans'] for feature in features]
_SCREAMING_SNAKE_CASE =padding_tensor(_a , (-1, -1) , _a , _a )
_SCREAMING_SNAKE_CASE ={k: torch.tensor(_a , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 405
| 0
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase ( a ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=snake_case__ , speech_processor=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , feature_extractor=snake_case__ , )
def lowercase ( self , snake_case__ = "auto" ):
if slice_size == "auto":
lowerCAmelCase : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case__ )
def lowercase ( self ):
self.enable_attention_slicing(snake_case__ )
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__=1_6000 , snake_case__ = 512 , snake_case__ = 512 , snake_case__ = 50 , snake_case__ = 7.5 , snake_case__ = None , snake_case__ = 1 , snake_case__ = 0.0 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , **snake_case__ , ):
lowerCAmelCase : List[str] = self.speech_processor.feature_extractor(
snake_case__ , return_tensors='pt' , sampling_rate=snake_case__ ).input_features.to(self.device )
lowerCAmelCase : Optional[Any] = self.speech_model.generate(snake_case__ , max_length=48_0000 )
lowerCAmelCase : str = self.speech_processor.tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , normalize=snake_case__ )[
0
]
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = 1
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = len(snake_case__ )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case__ )}." )
# get prompt text embeddings
lowerCAmelCase : str = self.tokenizer(
snake_case__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCAmelCase : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
lowerCAmelCase : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase : int = text_embeddings.shape
lowerCAmelCase : Any = text_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase : List[str]
if negative_prompt is None:
lowerCAmelCase : Any = [''] * batch_size
elif type(snake_case__ ) is not type(snake_case__ ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case__ )} !="
f" {type(snake_case__ )}." )
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = [negative_prompt]
elif batch_size != len(snake_case__ ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case__ )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
lowerCAmelCase : Dict = negative_prompt
lowerCAmelCase : Optional[int] = text_input_ids.shape[-1]
lowerCAmelCase : int = self.tokenizer(
snake_case__ , padding='max_length' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='pt' , )
lowerCAmelCase : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase : List[Any] = uncond_embeddings.shape[1]
lowerCAmelCase : List[str] = uncond_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase : Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase : str = torch.randn(snake_case__ , generator=snake_case__ , device='cpu' , dtype=snake_case__ ).to(
self.device )
else:
lowerCAmelCase : Tuple = torch.randn(snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
lowerCAmelCase : str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase : Tuple = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase : Union[str, Any] = {}
if accepts_eta:
lowerCAmelCase : int = eta
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase : Tuple = self.scheduler.scale_model_input(snake_case__ , snake_case__ )
# predict the noise residual
lowerCAmelCase : List[str] = self.unet(snake_case__ , snake_case__ , encoder_hidden_states=snake_case__ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase : Dict = noise_pred.chunk(2 )
lowerCAmelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase : int = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = 1 / 0.1_8_2_1_5 * latents
lowerCAmelCase : Dict = self.vae.decode(snake_case__ ).sample
lowerCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase : Dict = self.numpy_to_pil(snake_case__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=snake_case__ , nsfw_content_detected=snake_case__ )
| 711
|
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def __UpperCamelCase ( _A : str , _A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
lowerCAmelCase : Union[str, Any] = DatasetInfosDict.from_directory(_A )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def __UpperCamelCase ( _A : str , _A : DatasetInfo ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : str = str(_A )
dataset_info.write_to_directory(_A )
lowerCAmelCase : List[str] = DatasetInfo.from_directory(_A )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_A , 'dataset_info.json' ) )
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase : Tuple = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
lowerCAmelCase : Optional[int] = dataset_info._to_yaml_dict()
assert sorted(_A ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowerCAmelCase : Any = yaml.safe_dump(_A )
lowerCAmelCase : int = yaml.safe_load(_A )
assert dataset_info_yaml_dict == reloaded
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = DatasetInfo()
lowerCAmelCase : List[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def __UpperCamelCase ( _A : Tuple , _A : DatasetInfosDict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : Tuple = str(_A )
dataset_infos_dict.write_to_directory(_A )
lowerCAmelCase : List[str] = DatasetInfosDict.from_directory(_A )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowerCAmelCase : Tuple = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowerCAmelCase : Optional[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_A , 'README.md' ) )
| 646
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
class _snake_case ( _snake_case ):
def __init__( self , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=512 , _lowerCamelCase="cls" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
a :Union[str, Any] = project_dim
a :List[str] = pooler_fn
a :str = learn_encoder
a :Union[str, Any] = use_attention_mask
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = [r'pooler', r'logit_scale']
SCREAMING_SNAKE_CASE__ = [r'position_ids', r'predictions.decoder.bias']
SCREAMING_SNAKE_CASE__ = 'roberta'
SCREAMING_SNAKE_CASE__ = RobertaSeriesConfig
def __init__( self , _lowerCamelCase ):
super().__init__(UpperCamelCase_ )
a :Tuple = XLMRobertaModel(UpperCamelCase_ )
a :List[str] = nn.Linear(config.hidden_size , config.project_dim )
a :List[Any] = getattr(UpperCamelCase_ , '''has_pre_transformation''' , UpperCamelCase_ )
if self.has_pre_transformation:
a :Union[str, Any] = nn.Linear(config.hidden_size , config.project_dim )
a :Any = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , ):
a :Tuple = return_dict if return_dict is not None else self.config.use_return_dict
a :str = self.base_model(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_attentions=UpperCamelCase_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=UpperCamelCase_ , )
if self.has_pre_transformation:
a :int = outputs['''hidden_states'''][-2]
a :Any = self.pre_LN(UpperCamelCase_ )
a :str = self.transformation_pre(UpperCamelCase_ )
return TransformationModelOutput(
projection_state=UpperCamelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
a :int = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=UpperCamelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 445
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __snake_case ( unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=4 , ) -> List[Any]:
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_attention_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_choices
def _snake_case ( self ) -> Any:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = None
if self.use_attention_mask:
snake_case__ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _snake_case ( self ) -> Tuple:
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def _snake_case ( self ) -> Union[str, Any]:
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = True
snake_case__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __snake_case ( __magic_name__ , unittest.TestCase ):
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _snake_case ( self ) -> List[Any]:
snake_case__ = FlaxRobertaModelTester(self )
@slow
def _snake_case ( self ) -> Tuple:
for model_class_name in self.all_model_classes:
snake_case__ = model_class_name.from_pretrained('roberta-base' , from_pt=UpperCamelCase_ )
snake_case__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
| 368
| 0
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_UpperCAmelCase : Tuple = """facebook/wmt19-en-de"""
_UpperCAmelCase : Optional[Any] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_UpperCAmelCase : Tuple = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_UpperCAmelCase : Any = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
_UpperCAmelCase : Optional[int] = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
_UpperCAmelCase : str = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
_UpperCAmelCase : Optional[int] = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 702
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=30 , snake_case=400 , snake_case=True , snake_case=None , snake_case=True , snake_case=1 / 255 , snake_case=True , snake_case=[0.5, 0.5, 0.5] , snake_case=[0.5, 0.5, 0.5] , snake_case=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
snake_case_ = do_pad
def a ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def a ( self , snake_case , snake_case=False ):
if not batched:
snake_case_ = image_inputs[0]
if isinstance(snake_case , Image.Image ):
snake_case_ , snake_case_ = image.size
else:
snake_case_ , snake_case_ = image.shape[1], image.shape[2]
if w < h:
snake_case_ = int(self.size['shortest_edge'] * h / w )
snake_case_ = self.size['shortest_edge']
elif w > h:
snake_case_ = self.size['shortest_edge']
snake_case_ = int(self.size['shortest_edge'] * w / h )
else:
snake_case_ = self.size['shortest_edge']
snake_case_ = self.size['shortest_edge']
else:
snake_case_ = []
for image in image_inputs:
snake_case_ , snake_case_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ = max(snake_case , key=lambda snake_case : item[0] )[0]
snake_case_ = max(snake_case , key=lambda snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = DetrImageProcessor if is_vision_available() else None
def a ( self ):
snake_case_ = DetrImageProcessingTester(self )
@property
def a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self ):
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , 'image_mean' ) )
self.assertTrue(hasattr(snake_case , 'image_std' ) )
self.assertTrue(hasattr(snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case , 'do_rescale' ) )
self.assertTrue(hasattr(snake_case , 'rescale_factor' ) )
self.assertTrue(hasattr(snake_case , 'do_resize' ) )
self.assertTrue(hasattr(snake_case , 'size' ) )
self.assertTrue(hasattr(snake_case , 'do_pad' ) )
def a ( self ):
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , snake_case )
snake_case_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=snake_case )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , snake_case )
def a ( self ):
pass
def a ( self ):
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
snake_case_ = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self ):
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(snake_case , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self ):
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(snake_case , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a ( self ):
# prepare image and target
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {'image_id': 3_9769, 'annotations': target}
# encode them
snake_case_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
snake_case_ = image_processing(images=snake_case , annotations=snake_case , return_tensors='pt' )
# verify pixel values
snake_case_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case )
snake_case_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case , atol=1e-4 ) )
# verify area
snake_case_ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case )
snake_case_ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case , atol=1e-3 ) )
# verify image_id
snake_case_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case ) )
# verify class_labels
snake_case_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case ) )
# verify orig_size
snake_case_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case ) )
# verify size
snake_case_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case ) )
@slow
def a ( self ):
# prepare image, target and masks_path
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
snake_case_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
snake_case_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
snake_case_ = image_processing(images=snake_case , annotations=snake_case , masks_path=snake_case , return_tensors='pt' )
# verify pixel values
snake_case_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case )
snake_case_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case , atol=1e-4 ) )
# verify area
snake_case_ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case )
snake_case_ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case , atol=1e-3 ) )
# verify image_id
snake_case_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case ) )
# verify class_labels
snake_case_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case ) )
# verify masks
snake_case_ = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , snake_case )
# verify orig_size
snake_case_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case ) )
# verify size
snake_case_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case ) )
| 108
| 0
|
lowercase : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
lowercase : List[Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
A : str = True
A : Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
order.append(lowerCamelCase_ )
return order
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
A : Any = True
A : Optional[Any] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return component
def snake_case__ ( lowerCamelCase_ ):
A : Union[str, Any] = len(lowerCamelCase_ ) * [False]
A : dict[int, list[int]] = {vert: [] for vert in range(len(lowerCamelCase_ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(lowerCamelCase_ )
A : str = []
for i, was_visited in enumerate(lowerCamelCase_ ):
if not was_visited:
order += topology_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
A : Dict = []
A : str = len(lowerCamelCase_ ) * [False]
for i in range(len(lowerCamelCase_ ) ):
A : Optional[Any] = order[len(lowerCamelCase_ ) - i - 1]
if not visited[vert]:
A : List[str] = find_components(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
components_list.append(lowerCamelCase_ )
return components_list
| 542
|
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class __UpperCAmelCase (__A ):
'''simple docstring'''
def __init__( self , snake_case_="" , snake_case_="train" ):
'''simple docstring'''
assert os.path.isdir(snake_case_ )
A__ : Dict = []
A__ : Optional[Any] = os.listdir(snake_case_ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
A__ : Any = os.path.join(snake_case_ , snake_case_ )
if not os.path.isfile(snake_case_ ):
continue
self.documents.append(snake_case_ )
def __len__( self ):
'''simple docstring'''
return len(self.documents )
def __getitem__( self , snake_case_ ):
'''simple docstring'''
A__ : List[str] = self.documents[idx]
A__ : List[Any] = document_path.split("""/""" )[-1]
with open(snake_case_ , encoding="""utf-8""" ) as source:
A__ : Dict = source.read()
A__ , A__ : Dict = process_story(snake_case_ )
return document_name, story_lines, summary_lines
def _A( lowerCAmelCase ):
A__ : Optional[int] = list(filter(lambda lowerCAmelCase : len(lowerCAmelCase ) != 0 , [line.strip() for line in raw_story.split("""\n""" )] ) )
# for some unknown reason some lines miss a period, add it
A__ : List[str] = [_add_missing_period(lowerCAmelCase ) for line in nonempty_lines]
# gather article lines
A__ : Union[str, Any] = []
A__ : Optional[Any] = deque(lowerCAmelCase )
while True:
try:
A__ : List[str] = lines.popleft()
if element.startswith("""@highlight""" ):
break
story_lines.append(lowerCAmelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
A__ : List[str] = list(filter(lambda lowerCAmelCase : not t.startswith("""@highlight""" ) , lowerCAmelCase ) )
return story_lines, summary_lines
def _A( lowerCAmelCase ):
A__ : Tuple = [""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""]
if line.startswith("""@highlight""" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if len(lowerCAmelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(lowerCAmelCase )) )
return sequence
def _A( lowerCAmelCase , lowerCAmelCase ):
A__ : Any = torch.ones_like(lowerCAmelCase )
A__ : Any = sequence == pad_token_id
A__ : Any = 0
return mask
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
A__ : Dict = [tokenizer.encode(lowerCAmelCase ) for line in story_lines]
A__ : Any = [token for sentence in story_lines_token_ids for token in sentence]
A__ : List[str] = [tokenizer.encode(lowerCAmelCase ) for line in summary_lines]
A__ : int = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def _A( lowerCAmelCase , lowerCAmelCase ):
A__ : str = []
for sequence in batch:
A__ : List[Any] = -1
A__ : Dict = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(lowerCAmelCase )
return torch.tensor(lowerCAmelCase )
| 363
| 0
|
def A__ ( _a : int , _a : Optional[Any] ):
'''simple docstring'''
snake_case__ : List[str] =(boundary[1] - boundary[0]) / steps
snake_case__ : int =boundary[0]
snake_case__ : Optional[Any] =boundary[1]
snake_case__ : Union[str, Any] =make_points(_a , _a , _a )
snake_case__ : Union[str, Any] =0.0
y += (h / 2.0) * f(_a )
for i in x_i:
# print(i)
y += h * f(_a )
y += (h / 2.0) * f(_a )
return y
def A__ ( _a : Dict , _a : Union[str, Any] , _a : Union[str, Any] ):
'''simple docstring'''
snake_case__ : List[str] =a + h
while x < (b - h):
yield x
snake_case__ : int =x + h
def A__ ( _a : Any ): # enter your function here
'''simple docstring'''
snake_case__ : List[str] =(x - 0) * (x - 0)
return y
def A__ ( ):
'''simple docstring'''
snake_case__ : Optional[int] =0.0 # Lower bound of integration
snake_case__ : str =1.0 # Upper bound of integration
snake_case__ : int =1_0.0 # define number of steps or resolution
snake_case__ : Dict =[a, b] # define boundary of integration
snake_case__ : Optional[Any] =method_a(_a , _a )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 711
|
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowercase ( unittest.TestCase ):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ):
snake_case__ : Any =parent
snake_case__ : Dict =batch_size
snake_case__ : List[Any] =seq_length
snake_case__ : str =is_training
snake_case__ : Union[str, Any] =use_attention_mask
snake_case__ : str =use_token_type_ids
snake_case__ : int =use_labels
snake_case__ : Tuple =vocab_size
snake_case__ : List[str] =hidden_size
snake_case__ : Dict =num_hidden_layers
snake_case__ : Optional[Any] =num_attention_heads
snake_case__ : List[str] =intermediate_size
snake_case__ : str =hidden_act
snake_case__ : Union[str, Any] =hidden_dropout_prob
snake_case__ : Tuple =attention_probs_dropout_prob
snake_case__ : Tuple =max_position_embeddings
snake_case__ : str =type_vocab_size
snake_case__ : Optional[Any] =type_sequence_label_size
snake_case__ : str =initializer_range
snake_case__ : List[Any] =num_choices
def lowercase__ ( self ):
snake_case__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Tuple =None
if self.use_attention_mask:
snake_case__ : Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] =None
if self.use_token_type_ids:
snake_case__ : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Tuple =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self ):
snake_case__ : Optional[int] =self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[Any] =config_and_inputs
snake_case__ : Dict ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowercase ( _A , unittest.TestCase ):
_a : str = True
_a : Optional[Any] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self ):
snake_case__ : Optional[Any] =FlaxRoFormerModelTester(self )
@slow
def lowercase__ ( self ):
for model_class_name in self.all_model_classes:
snake_case__ : Tuple =model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=a )
snake_case__ : List[Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class _lowercase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
snake_case__ : Optional[int] =FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case__ : Tuple =jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case__ : str =model(a )[0]
snake_case__ : List[str] =5_0_0_0_0
snake_case__ : str =(1, 6, vocab_size)
self.assertEqual(output.shape , a )
snake_case__ : Optional[int] =jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , a , atol=1e-4 ) )
| 448
| 0
|
from __future__ import annotations
def __lowerCamelCase (UpperCAmelCase__ : int = 4 ):
SCREAMING_SNAKE_CASE = abs(UpperCAmelCase__ ) or 4
return [[1 + x + y * row_size for x in range(UpperCAmelCase__ )] for y in range(UpperCAmelCase__ )]
def __lowerCamelCase (UpperCAmelCase__ : list[list[int]] ):
return reverse_row(transpose(UpperCAmelCase__ ) )
# OR.. transpose(reverse_column(matrix))
def __lowerCamelCase (UpperCAmelCase__ : list[list[int]] ):
return reverse_row(reverse_column(UpperCAmelCase__ ) )
# OR.. reverse_column(reverse_row(matrix))
def __lowerCamelCase (UpperCAmelCase__ : list[list[int]] ):
return reverse_column(transpose(UpperCAmelCase__ ) )
# OR.. transpose(reverse_row(matrix))
def __lowerCamelCase (UpperCAmelCase__ : list[list[int]] ):
SCREAMING_SNAKE_CASE = [list(UpperCAmelCase__ ) for x in zip(*UpperCAmelCase__ )]
return matrix
def __lowerCamelCase (UpperCAmelCase__ : list[list[int]] ):
SCREAMING_SNAKE_CASE = matrix[::-1]
return matrix
def __lowerCamelCase (UpperCAmelCase__ : list[list[int]] ):
SCREAMING_SNAKE_CASE = [x[::-1] for x in matrix]
return matrix
def __lowerCamelCase (UpperCAmelCase__ : list[list[int]] ):
for i in matrix:
print(*UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
_lowerCamelCase : Optional[int] = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
_lowerCamelCase : Any = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 403
|
def __lowerCamelCase (UpperCAmelCase__ : Dict ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ )
# create the counting array
SCREAMING_SNAKE_CASE = coll_max + 1 - coll_min
SCREAMING_SNAKE_CASE = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = counting_arr[i] + counting_arr[i - 1]
# create the output collection
SCREAMING_SNAKE_CASE = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCAmelCase__ ) ):
SCREAMING_SNAKE_CASE = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
return "".join([chr(UpperCAmelCase__ ) for i in counting_sort([ord(UpperCAmelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
_lowerCamelCase : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCamelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 403
| 1
|
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase( _A : list ):
'''simple docstring'''
if len(_A ) == 0:
return []
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = min(_A ), max(_A )
UpperCAmelCase__ : List[Any] = int(max_value - min_value ) + 1
UpperCAmelCase__ : list[list] = [[] for _ in range(_A )]
for i in my_list:
buckets[int(i - min_value )].append(_A )
return [v for bucket in buckets for v in sorted(_A )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 496
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def __UpperCamelCase( _A : float , _A : float , _A : float ):
'''simple docstring'''
UpperCAmelCase__ : int = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 496
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class A( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = StableDiffusionLDMaDPipeline
UpperCamelCase = TEXT_TO_IMAGE_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCamelCase_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCamelCase_ = CLIPTextModel(A_ )
lowerCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a__ ( self : Any , A_ : List[str] , A_ : List[str]=0 ) -> int:
"""simple docstring"""
if str(A_ ).startswith('mps' ):
lowerCamelCase_ = torch.manual_seed(A_ )
else:
lowerCamelCase_ = torch.Generator(device=A_ ).manual_seed(A_ )
lowerCamelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionLDMaDPipeline(**A_ )
lowerCamelCase_ = ldmad_pipe.to(A_ )
ldmad_pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = self.get_dummy_inputs(A_ )
lowerCamelCase_ = ldmad_pipe(**A_ )
lowerCamelCase_ , lowerCamelCase_ = output.rgb, output.depth
lowerCamelCase_ = rgb[0, -3:, -3:, -1]
lowerCamelCase_ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCamelCase_ = np.array(
[0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262] )
lowerCamelCase_ = np.array([103.46727, 85.812004, 87.849236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def a__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionLDMaDPipeline(**A_ )
lowerCamelCase_ = ldmad_pipe.to(A_ )
ldmad_pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = self.get_dummy_inputs(A_ )
lowerCamelCase_ = 3 * [inputs['prompt']]
# forward
lowerCamelCase_ = ldmad_pipe(**A_ )
lowerCamelCase_ , lowerCamelCase_ = output.rgb, output.depth
lowerCamelCase_ = rgb_slice_a[0, -3:, -3:, -1]
lowerCamelCase_ = depth_slice_a[0, -3:, -1]
lowerCamelCase_ = self.get_dummy_inputs(A_ )
lowerCamelCase_ = 3 * [inputs.pop('prompt' )]
lowerCamelCase_ = ldmad_pipe.tokenizer(
A_ , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=A_ , return_tensors='pt' , )
lowerCamelCase_ = text_inputs['input_ids'].to(A_ )
lowerCamelCase_ = ldmad_pipe.text_encoder(A_ )[0]
lowerCamelCase_ = prompt_embeds
# forward
lowerCamelCase_ = ldmad_pipe(**A_ )
lowerCamelCase_ , lowerCamelCase_ = output.rgb, output.depth
lowerCamelCase_ = rgb_slice_a[0, -3:, -3:, -1]
lowerCamelCase_ = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = PNDMScheduler(skip_prk_steps=A_ )
lowerCamelCase_ = StableDiffusionLDMaDPipeline(**A_ )
lowerCamelCase_ = ldmad_pipe.to(A_ )
ldmad_pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = self.get_dummy_inputs(A_ )
lowerCamelCase_ = 'french fries'
lowerCamelCase_ = ldmad_pipe(**A_ , negative_prompt=A_ )
lowerCamelCase_ , lowerCamelCase_ = output.rgb, output.depth
lowerCamelCase_ = rgb[0, -3:, -3:, -1]
lowerCamelCase_ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCamelCase_ = np.array(
[0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217] )
lowerCamelCase_ = np.array([107.84738, 84.62802, 89.962135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Dict , A_ : Any , A_ : int="cpu" , A_ : int=torch.floataa , A_ : Dict=0 ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = torch.Generator(device=A_ ).manual_seed(A_ )
lowerCamelCase_ = np.random.RandomState(A_ ).standard_normal((1, 4, 64, 64) )
lowerCamelCase_ = torch.from_numpy(A_ ).to(device=A_ , dtype=A_ )
lowerCamelCase_ = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
lowerCamelCase_ = ldmad_pipe.to(A_ )
ldmad_pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = self.get_inputs(A_ )
lowerCamelCase_ = ldmad_pipe(**A_ )
lowerCamelCase_ , lowerCamelCase_ = output.rgb, output.depth
lowerCamelCase_ = rgb[0, -3:, -3:, -1].flatten()
lowerCamelCase_ = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
lowerCamelCase_ = np.array(
[0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706] )
lowerCamelCase_ = np.array(
[0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Union[str, Any] , A_ : Optional[Any] , A_ : str="cpu" , A_ : List[Any]=torch.floataa , A_ : Dict=0 ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = torch.Generator(device=A_ ).manual_seed(A_ )
lowerCamelCase_ = np.random.RandomState(A_ ).standard_normal((1, 4, 64, 64) )
lowerCamelCase_ = torch.from_numpy(A_ ).to(device=A_ , dtype=A_ )
lowerCamelCase_ = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(A_ )
ldmad_pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = self.get_inputs(A_ )
lowerCamelCase_ = ldmad_pipe(**A_ )
lowerCamelCase_ , lowerCamelCase_ = output.rgb, output.depth
lowerCamelCase_ = 0.495586
lowerCamelCase_ = 0.33795515
lowerCamelCase_ = 112.48518
lowerCamelCase_ = 98.489746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(A_ )
ldmad_pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = self.get_inputs(A_ )
lowerCamelCase_ = ldmad_pipe(**A_ )
lowerCamelCase_ , lowerCamelCase_ = output.rgb, output.depth
lowerCamelCase_ = 0.4194127
lowerCamelCase_ = 0.35375586
lowerCamelCase_ = 0.5638502
lowerCamelCase_ = 0.34686103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 70
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=3_0 , _UpperCamelCase=4_0_0 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=True , _UpperCamelCase=1 / 2_5_5 , _UpperCamelCase=True , ) -> Dict:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCAmelCase_ : Dict = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : Optional[int] = min_resolution
UpperCAmelCase_ : List[Any] = max_resolution
UpperCAmelCase_ : str = do_resize
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : Tuple = do_normalize
UpperCAmelCase_ : str = image_mean
UpperCAmelCase_ : Any = image_std
UpperCAmelCase_ : Optional[Any] = do_rescale
UpperCAmelCase_ : Union[str, Any] = rescale_factor
UpperCAmelCase_ : Optional[int] = do_pad
def __UpperCAmelCase ( self ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False ) -> Tuple:
if not batched:
UpperCAmelCase_ : List[Any] = image_inputs[0]
if isinstance(_UpperCamelCase , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ : str = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ : Tuple = int(self.size['shortest_edge'] * h / w )
UpperCAmelCase_ : int = self.size['shortest_edge']
elif w > h:
UpperCAmelCase_ : Any = self.size['shortest_edge']
UpperCAmelCase_ : List[Any] = int(self.size['shortest_edge'] * w / h )
else:
UpperCAmelCase_ : Optional[Any] = self.size['shortest_edge']
UpperCAmelCase_ : str = self.size['shortest_edge']
else:
UpperCAmelCase_ : Tuple = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ : Optional[Any] = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[0] )[0]
UpperCAmelCase_ : Tuple = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = YolosImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = YolosImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'size' ) )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=_UpperCamelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2, 'longest_edge': 8_4} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[str]:
pass
def __UpperCAmelCase ( self ) -> List[Any]:
# Initialize image_processing
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : str = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
UpperCAmelCase_ : Dict = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
UpperCAmelCase_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ : Optional[Any] = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
# Initialize image_processings
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase_ : str = self.image_processing_class(do_resize=_UpperCamelCase , do_normalize=_UpperCamelCase , do_rescale=_UpperCamelCase )
# create random PyTorch tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
UpperCAmelCase_ : int = image_processing_a.pad(_UpperCamelCase , return_tensors='pt' )
UpperCAmelCase_ : int = image_processing_a(_UpperCamelCase , return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ) -> Optional[Any]:
# prepare image and target
UpperCAmelCase_ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
UpperCAmelCase_ : str = json.loads(f.read() )
UpperCAmelCase_ : Dict = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
UpperCAmelCase_ : str = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
UpperCAmelCase_ : Optional[Any] = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , return_tensors='pt' )
# verify pixel values
UpperCAmelCase_ : Any = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCamelCase , atol=1E-4 ) )
# verify area
UpperCAmelCase_ : int = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCamelCase ) )
# verify boxes
UpperCAmelCase_ : int = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCamelCase , atol=1E-3 ) )
# verify image_id
UpperCAmelCase_ : List[Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCamelCase ) )
# verify is_crowd
UpperCAmelCase_ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCamelCase ) )
# verify class_labels
UpperCAmelCase_ : int = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCamelCase ) )
# verify orig_size
UpperCAmelCase_ : str = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCamelCase ) )
# verify size
UpperCAmelCase_ : Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCamelCase ) )
@slow
def __UpperCAmelCase ( self ) -> str:
# prepare image, target and masks_path
UpperCAmelCase_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
UpperCAmelCase_ : Union[str, Any] = json.loads(f.read() )
UpperCAmelCase_ : List[str] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
UpperCAmelCase_ : int = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
UpperCAmelCase_ : str = YolosImageProcessor(format='coco_panoptic' )
UpperCAmelCase_ : List[Any] = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , masks_path=_UpperCamelCase , return_tensors='pt' )
# verify pixel values
UpperCAmelCase_ : Optional[int] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCamelCase , atol=1E-4 ) )
# verify area
UpperCAmelCase_ : Dict = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCamelCase ) )
# verify boxes
UpperCAmelCase_ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCamelCase , atol=1E-3 ) )
# verify image_id
UpperCAmelCase_ : Dict = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCamelCase ) )
# verify is_crowd
UpperCAmelCase_ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCamelCase ) )
# verify class_labels
UpperCAmelCase_ : Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCamelCase ) )
# verify masks
UpperCAmelCase_ : Optional[Any] = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _UpperCamelCase )
# verify orig_size
UpperCAmelCase_ : Union[str, Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCamelCase ) )
# verify size
UpperCAmelCase_ : List[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCamelCase ) )
| 406
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> List[str]:
UpperCAmelCase_ : str = [0] * len(UpperCamelCase )
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(UpperCamelCase )
while queue:
UpperCAmelCase_ : Optional[Any] = queue.pop(0 )
cnt += 1
topo.append(UpperCamelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(UpperCamelCase )
if cnt != len(UpperCamelCase ):
print('Cycle exists' )
else:
print(UpperCamelCase )
# Adjacency List of Graph
lowerCAmelCase__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 709
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowercase ( a_ ):
_lowerCamelCase : torch.FloatTensor
class lowercase ( a_, a_ ):
@register_to_config
def __init__( self , _snake_case = 6_5536 , _snake_case = None , _snake_case = 2 , _snake_case = 2 , _snake_case = 0 , _snake_case = "fourier" , _snake_case = True , _snake_case = False , _snake_case = 0.0 , _snake_case = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _snake_case = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _snake_case = "UNetMidBlock1D" , _snake_case = None , _snake_case = (32, 32, 64) , _snake_case = None , _snake_case = 8 , _snake_case = 1 , _snake_case = False , ) -> List[str]:
super().__init__()
UpperCAmelCase_ : Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
UpperCAmelCase_ : Tuple = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_snake_case , log=_snake_case , flip_sin_to_cos=_snake_case)
UpperCAmelCase_ : int = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
UpperCAmelCase_ : Optional[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_snake_case , downscale_freq_shift=_snake_case)
UpperCAmelCase_ : List[Any] = block_out_channels[0]
if use_timestep_embedding:
UpperCAmelCase_ : Dict = block_out_channels[0] * 4
UpperCAmelCase_ : List[Any] = TimestepEmbedding(
in_channels=_snake_case , time_embed_dim=_snake_case , act_fn=_snake_case , out_dim=block_out_channels[0] , )
UpperCAmelCase_ : int = nn.ModuleList([])
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[int] = nn.ModuleList([])
UpperCAmelCase_ : Any = None
# down
UpperCAmelCase_ : Dict = in_channels
for i, down_block_type in enumerate(_snake_case):
UpperCAmelCase_ : int = output_channel
UpperCAmelCase_ : Optional[int] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
UpperCAmelCase_ : int = i == len(_snake_case) - 1
UpperCAmelCase_ : Any = get_down_block(
_snake_case , num_layers=_snake_case , in_channels=_snake_case , out_channels=_snake_case , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_snake_case)
# mid
UpperCAmelCase_ : Optional[int] = get_mid_block(
_snake_case , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_snake_case , add_downsample=_snake_case , )
# up
UpperCAmelCase_ : Union[str, Any] = list(reversed(_snake_case))
UpperCAmelCase_ : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
UpperCAmelCase_ : Tuple = out_channels
else:
UpperCAmelCase_ : int = block_out_channels[0]
for i, up_block_type in enumerate(_snake_case):
UpperCAmelCase_ : Dict = output_channel
UpperCAmelCase_ : Optional[Any] = (
reversed_block_out_channels[i + 1] if i < len(_snake_case) - 1 else final_upsample_channels
)
UpperCAmelCase_ : str = i == len(_snake_case) - 1
UpperCAmelCase_ : Union[str, Any] = get_up_block(
_snake_case , num_layers=_snake_case , in_channels=_snake_case , out_channels=_snake_case , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_snake_case)
UpperCAmelCase_ : Dict = output_channel
# out
UpperCAmelCase_ : Union[str, Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
UpperCAmelCase_ : Any = get_out_block(
out_block_type=_snake_case , num_groups_out=_snake_case , embed_dim=block_out_channels[0] , out_channels=_snake_case , act_fn=_snake_case , fc_dim=block_out_channels[-1] // 4 , )
def _snake_case ( self , _snake_case , _snake_case , _snake_case = True , ) -> Union[UNetaDOutput, Tuple]:
UpperCAmelCase_ : Union[str, Any] = timestep
if not torch.is_tensor(_snake_case):
UpperCAmelCase_ : Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(_snake_case) and len(timesteps.shape) == 0:
UpperCAmelCase_ : Tuple = timesteps[None].to(sample.device)
UpperCAmelCase_ : Any = self.time_proj(_snake_case)
if self.config.use_timestep_embedding:
UpperCAmelCase_ : int = self.time_mlp(_snake_case)
else:
UpperCAmelCase_ : int = timestep_embed[..., None]
UpperCAmelCase_ : List[Any] = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
UpperCAmelCase_ : int = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
UpperCAmelCase_ : Optional[Any] = ()
for downsample_block in self.down_blocks:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = downsample_block(hidden_states=_snake_case , temb=_snake_case)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
UpperCAmelCase_ : List[Any] = self.mid_block(_snake_case , _snake_case)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
UpperCAmelCase_ : int = down_block_res_samples[-1:]
UpperCAmelCase_ : Tuple = down_block_res_samples[:-1]
UpperCAmelCase_ : List[Any] = upsample_block(_snake_case , res_hidden_states_tuple=_snake_case , temb=_snake_case)
# 5. post-process
if self.out_block:
UpperCAmelCase_ : Optional[Any] = self.out_block(_snake_case , _snake_case)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_snake_case)
| 471
| 0
|
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a_ ( __a , __a ):
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a_ ( __a , __a , __a ):
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A__ = ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a_ ( __a , __a , __a ):
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A__ = features.copy() if features else default_expected_features
A__ = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ = ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a_ ( __a , __a , __a ):
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A__ = ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def a_ ( __a , __a , __a ):
if issubclass(__a , __a ):
A__ = parquet_path
elif issubclass(__a , __a ):
A__ = [parquet_path]
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A__ = ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def a_ ( __a , __a , __a=("train",) ):
assert isinstance(__a , __a )
for split in splits:
A__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a_ ( __a , __a , __a ):
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A__ = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a_ ( __a , __a , __a ):
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A__ = features.copy() if features else default_expected_features
A__ = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ = ParquetDatasetReader({'''train''': parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a_ ( __a , __a , __a ):
if split:
A__ = {split: parquet_path}
else:
A__ = '''train'''
A__ = {'''train''': parquet_path, '''test''': parquet_path}
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A__ = ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a_ ( __a , __a ):
A__ = ParquetDatasetWriter(__a , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
A__ = pq.ParquetFile(tmp_path / '''foo.parquet''' )
A__ = pf.read()
assert dataset.data.table == output_table
def a_ ( __a , __a ):
A__ = str(shared_datadir / '''test_image_rgb.jpg''' )
A__ = {'''image''': [image_path]}
A__ = Features({'''image''': Image()} )
A__ = Dataset.from_dict(__a , features=__a )
A__ = ParquetDatasetWriter(__a , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
A__ = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
A__ = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def a_ ( __a , __a ):
assert get_writer_batch_size(__a ) == expected
| 571
|
"""simple docstring"""
from collections import namedtuple
__snake_case : Optional[int] = namedtuple('from_to', 'from_ to')
__snake_case : Union[str, Any] = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.00_454, 264.172),
'cubicyard': from_to(0.76_455, 1.30_795),
'cubicfoot': from_to(0.028, 35.3_147),
'cup': from_to(0.000_236_588, 4_226.75),
}
def a_ ( __a , __a , __a ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ''', '''.join(__a ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ''', '''.join(__a ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 571
| 1
|
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def a__ (__lowercase :Optional[int] ) -> Any:
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def a__ () -> Tuple:
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def a__ () -> List[Any]:
_A : Dict = '''mock-s3-bucket'''
_A : List[Any] = f"""s3://{mock_bucket}"""
_A : Tuple = extract_path_from_uri(__lowercase )
assert dataset_path.startswith('''s3://''' ) is False
_A : Tuple = '''./local/path'''
_A : int = extract_path_from_uri(__lowercase )
assert dataset_path == new_dataset_path
def a__ (__lowercase :Tuple ) -> Optional[int]:
_A : Optional[int] = is_remote_filesystem(__lowercase )
assert is_remote is True
_A : Optional[Any] = fsspec.filesystem('''file''' )
_A : List[Any] = is_remote_filesystem(__lowercase )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __lowercase )
def a__ (__lowercase :Tuple , __lowercase :Dict , __lowercase :List[str] , __lowercase :List[Any] , __lowercase :List[str] , __lowercase :List[Any] , __lowercase :Any ) -> Any:
_A : str = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
_A : Tuple = input_paths[compression_fs_class.protocol]
if input_path is None:
_A : Tuple = f"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowercase )
_A : Any = fsspec.filesystem(compression_fs_class.protocol , fo=__lowercase )
assert isinstance(__lowercase , __lowercase )
_A : Tuple = os.path.basename(__lowercase )
_A : List[Any] = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__lowercase , '''r''' , encoding='''utf-8''' ) as f, open(__lowercase , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def a__ (__lowercase :Union[str, Any] , __lowercase :Optional[int] , __lowercase :Optional[int] ) -> Optional[int]:
_A : Any = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
_A : Tuple = compressed_file_paths[protocol]
_A : Tuple = '''dataset.jsonl'''
_A : List[Any] = f"""{protocol}://{member_file_path}::{compressed_file_path}"""
_A : Optional[int] = fsspec.get_fs_token_paths(__lowercase )
assert fs.isfile(__lowercase )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def a__ (__lowercase :List[Any] , __lowercase :int , __lowercase :int , __lowercase :str ) -> Optional[Any]:
_A : int = hf_api.dataset_info(__lowercase , token=__lowercase )
_A : int = HfFileSystem(repo_info=__lowercase , token=__lowercase )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__lowercase ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def a__ () -> Optional[Any]:
_A : Any = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__lowercase , __lowercase , clobber=__lowercase )
with pytest.warns(__lowercase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__lowercase ) == 1
assert (
str(warning_info[0].message )
== f"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 700
|
def a__ (__lowercase :Tuple ) -> Optional[Any]:
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
_A : List[str] = len(__lowercase )
_A : Optional[Any] = max(__lowercase )
_A : Tuple = min(__lowercase )
# create the counting array
_A : Optional[Any] = coll_max + 1 - coll_min
_A : Dict = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , __lowercase ):
_A : List[str] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_A : str = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , __lowercase ) ):
_A : List[str] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def a__ (__lowercase :int ) -> List[str]:
return "".join([chr(__lowercase ) for i in counting_sort([ord(__lowercase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
_UpperCamelCase : Tuple =input('Enter numbers separated by a comma:\n').strip()
_UpperCamelCase : Any =[int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 332
| 0
|
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
# TODO Update this
lowerCamelCase = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''esm'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[str]=768 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : str=3072 , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=1026 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : List[str]=1e-12 , _UpperCAmelCase : Tuple="absolute" , _UpperCAmelCase : str=True , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[str]=None , **_UpperCAmelCase : Dict , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , mask_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = emb_layer_norm_before
UpperCAmelCase_ = token_dropout
UpperCAmelCase_ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
UpperCAmelCase_ = EsmFoldConfig()
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = EsmFoldConfig(**_UpperCAmelCase )
UpperCAmelCase_ = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
UpperCAmelCase_ = get_default_vocab_list()
else:
UpperCAmelCase_ = vocab_list
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , _UpperCAmelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = super().to_dict()
if isinstance(self.esmfold_config , _UpperCAmelCase ):
UpperCAmelCase_ = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = None
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = 0
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = 1_28
UpperCamelCase = None
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
if self.trunk is None:
UpperCAmelCase_ = TrunkConfig()
elif isinstance(self.trunk , _UpperCAmelCase ):
UpperCAmelCase_ = TrunkConfig(**self.trunk )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = asdict(self )
UpperCAmelCase_ = self.trunk.to_dict()
return output
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = 48
UpperCamelCase = 10_24
UpperCamelCase = 1_28
UpperCamelCase = 32
UpperCamelCase = 32
UpperCamelCase = 32
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = False
UpperCamelCase = 4
UpperCamelCase = 1_28
UpperCamelCase = None
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
if self.structure_module is None:
UpperCAmelCase_ = StructureModuleConfig()
elif isinstance(self.structure_module , _UpperCAmelCase ):
UpperCAmelCase_ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
UpperCAmelCase_ = self.sequence_state_dim // self.sequence_head_width
UpperCAmelCase_ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = asdict(self )
UpperCAmelCase_ = self.structure_module.to_dict()
return output
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = 3_84
UpperCamelCase = 1_28
UpperCamelCase = 16
UpperCamelCase = 1_28
UpperCamelCase = 12
UpperCamelCase = 4
UpperCamelCase = 8
UpperCamelCase = 0.1
UpperCamelCase = 8
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 7
UpperCamelCase = 10
UpperCamelCase = 1E-8
UpperCamelCase = 1E5
def lowercase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
return asdict(self )
def a__ ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 82
|
from functools import lru_cache
@lru_cache
def __lowerCamelCase ( _lowercase ) -> int:
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 282
| 0
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
model.train()
SCREAMING_SNAKE_CASE = model(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = F.mse_loss(SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
set_seed(42 )
SCREAMING_SNAKE_CASE = RegressionModel()
SCREAMING_SNAKE_CASE = deepcopy(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE = DataLoader(SCREAMING_SNAKE_CASE , batch_size=16 )
model.to(accelerator.device )
if sched:
SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE = AdamW(params=ddp_model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
SCREAMING_SNAKE_CASE = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
# Make a copy of `model`
if sched:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
SCREAMING_SNAKE_CASE = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
SCREAMING_SNAKE_CASE = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_training_setup(SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
SCREAMING_SNAKE_CASE = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_training_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
SCREAMING_SNAKE_CASE = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def lowerCamelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = Accelerator()
SCREAMING_SNAKE_CASE = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE = DataLoader(SCREAMING_SNAKE_CASE , batch_size=16 )
SCREAMING_SNAKE_CASE = RegressionDataset(length=96 )
SCREAMING_SNAKE_CASE = DataLoader(SCREAMING_SNAKE_CASE , batch_size=16 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if iteration < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if batch_num < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCamelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = Accelerator()
SCREAMING_SNAKE_CASE = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 718
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( A ):
'''simple docstring'''
a__ = 42
a__ = 42
def __init__( self : Any , a : UNetaDModel , a : ScoreSdeVeScheduler ) -> str:
super().__init__()
self.register_modules(unet=a , scheduler=a )
@torch.no_grad()
def __call__( self : List[Any] , a : int = 1 , a : int = 2_000 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , **a : Optional[int] , ) -> Union[ImagePipelineOutput, Tuple]:
SCREAMING_SNAKE_CASE = self.unet.config.sample_size
SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE = self.unet
SCREAMING_SNAKE_CASE = randn_tensor(a , generator=a ) * self.scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE = sample.to(self.device )
self.scheduler.set_timesteps(a )
self.scheduler.set_sigmas(a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
SCREAMING_SNAKE_CASE = self.unet(a , a ).sample
SCREAMING_SNAKE_CASE = self.scheduler.step_correct(a , a , generator=a ).prev_sample
# prediction step
SCREAMING_SNAKE_CASE = model(a , a ).sample
SCREAMING_SNAKE_CASE = self.scheduler.step_pred(a , a , a , generator=a )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = output.prev_sample, output.prev_sample_mean
SCREAMING_SNAKE_CASE = sample_mean.clamp(0 , 1 )
SCREAMING_SNAKE_CASE = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=a )
| 450
| 0
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
snake_case : Optional[int] = logging.getLogger(__name__)
class __lowercase ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "sequence-classification"
def __init__( self , A_ )-> Optional[int]:
if type(A_ ) == dict:
_SCREAMING_SNAKE_CASE = Namespace(**A_ )
_SCREAMING_SNAKE_CASE = glue_output_modes[hparams.task]
_SCREAMING_SNAKE_CASE = glue_tasks_num_labels[hparams.task]
super().__init__(A_ , A_ , self.mode )
def __magic_name__ ( self , **A_ )-> str:
return self.model(**A_ )
def __magic_name__ ( self , A_ , A_ )-> Tuple:
_SCREAMING_SNAKE_CASE = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_SCREAMING_SNAKE_CASE = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_SCREAMING_SNAKE_CASE = self(**A_ )
_SCREAMING_SNAKE_CASE = outputs[0]
_SCREAMING_SNAKE_CASE = self.trainer.lr_schedulers[0]['scheduler']
_SCREAMING_SNAKE_CASE = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __magic_name__ ( self )-> Optional[int]:
_SCREAMING_SNAKE_CASE = self.hparams
_SCREAMING_SNAKE_CASE = processors[args.task]()
_SCREAMING_SNAKE_CASE = processor.get_labels()
for mode in ["train", "dev"]:
_SCREAMING_SNAKE_CASE = self._feature_file(A_ )
if os.path.exists(A_ ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , A_ )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_SCREAMING_SNAKE_CASE = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_SCREAMING_SNAKE_CASE = convert_examples_to_features(
A_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , A_ )
torch.save(A_ , A_ )
def __magic_name__ ( self , A_ , A_ , A_ = False )-> DataLoader:
_SCREAMING_SNAKE_CASE = 'dev' if mode == 'test' else mode
_SCREAMING_SNAKE_CASE = self._feature_file(A_ )
logger.info('Loading features from cached file %s' , A_ )
_SCREAMING_SNAKE_CASE = torch.load(A_ )
_SCREAMING_SNAKE_CASE = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_SCREAMING_SNAKE_CASE = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_SCREAMING_SNAKE_CASE = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_SCREAMING_SNAKE_CASE = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_SCREAMING_SNAKE_CASE = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(A_ , A_ , A_ , A_ ) , batch_size=A_ , shuffle=A_ , )
def __magic_name__ ( self , A_ , A_ )-> List[Any]:
_SCREAMING_SNAKE_CASE = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_SCREAMING_SNAKE_CASE = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_SCREAMING_SNAKE_CASE = self(**A_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = outputs[:2]
_SCREAMING_SNAKE_CASE = logits.detach().cpu().numpy()
_SCREAMING_SNAKE_CASE = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __magic_name__ ( self , A_ )-> tuple:
_SCREAMING_SNAKE_CASE = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_SCREAMING_SNAKE_CASE = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_SCREAMING_SNAKE_CASE = np.argmax(A_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_SCREAMING_SNAKE_CASE = np.squeeze(A_ )
_SCREAMING_SNAKE_CASE = np.concatenate([x['target'] for x in outputs] , axis=0 )
_SCREAMING_SNAKE_CASE = [[] for _ in range(out_label_ids.shape[0] )]
_SCREAMING_SNAKE_CASE = [[] for _ in range(out_label_ids.shape[0] )]
_SCREAMING_SNAKE_CASE = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , A_ , A_ )}
_SCREAMING_SNAKE_CASE = dict(results.items() )
_SCREAMING_SNAKE_CASE = results
return ret, preds_list, out_label_list
def __magic_name__ ( self , A_ )-> dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._eval_end(A_ )
_SCREAMING_SNAKE_CASE = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __magic_name__ ( self , A_ )-> dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._eval_end(A_ )
_SCREAMING_SNAKE_CASE = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __magic_name__ ( A_ , A_ )-> Union[str, Any]:
BaseTransformer.add_model_specific_args(A_ , A_ )
parser.add_argument(
'--max_seq_length' , default=128 , type=A_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=A_ , required=A_ , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=A_ , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
add_generic_args(UpperCAmelCase__ ,os.getcwd() )
_SCREAMING_SNAKE_CASE = GLUETransformer.add_model_specific_args(UpperCAmelCase__ ,os.getcwd() )
_SCREAMING_SNAKE_CASE = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_SCREAMING_SNAKE_CASE = os.path.join(
'./results' ,f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' ,)
os.makedirs(args.output_dir )
_SCREAMING_SNAKE_CASE = GLUETransformer(UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE = generic_train(UpperCAmelCase__ ,UpperCAmelCase__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_SCREAMING_SNAKE_CASE = sorted(glob.glob(os.path.join(args.output_dir ,'checkpoint-epoch=*.ckpt' ) ,recursive=UpperCAmelCase__ ) )
_SCREAMING_SNAKE_CASE = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 605
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __lowercase ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 605
| 1
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 189
|
import math
def __lowerCAmelCase ( snake_case : int ) -> bool:
__lowerCamelCase: Dict = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(snake_case )
def __lowerCAmelCase ( snake_case : float = 1 / 12345 ) -> int:
__lowerCamelCase: str = 0
__lowerCamelCase: Optional[Any] = 0
__lowerCamelCase: str = 3
while True:
__lowerCamelCase: int = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(snake_case ):
__lowerCamelCase: Union[str, Any] = int(snake_case )
total_partitions += 1
if check_partition_perfect(snake_case ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(snake_case )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 189
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case : int = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : int = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
snake_case : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 124
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
snake_case : Optional[int] = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCAmelCase_ ( _snake_case : int ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(_snake_case , torch.Tensor ):
return image
elif isinstance(_snake_case , PIL.Image.Image ):
__magic_name__ : List[str] = [image]
__magic_name__ : Optional[int] = [trans(img.convert("RGB" ) ) for img in image]
__magic_name__ : List[str] = torch.stack(_snake_case )
return image
class _snake_case ( snake_case ):
def __init__( self , _a , _a ):
super().__init__()
# make sure scheduler can always be converted to DDIM
__magic_name__ : Optional[int] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_a , scheduler=_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
# get the original timestep using init_timestep
__magic_name__ : List[str] = min(int(num_inference_steps * strength ) , _a )
__magic_name__ : Dict = max(num_inference_steps - init_timestep , 0 )
__magic_name__ : Union[str, Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a=None ):
if not isinstance(_a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_a )}''' )
__magic_name__ : Any = image.to(device=_a , dtype=_a )
if isinstance(_a , _a ) and len(_a ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(_a )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__magic_name__ : Optional[int] = init_latents.shape
__magic_name__ : Optional[Any] = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
# get latents
print("add noise to latents at timestep" , _a )
__magic_name__ : List[Any] = self.scheduler.add_noise(_a , _a , _a )
__magic_name__ : int = init_latents
return latents
@torch.no_grad()
def __call__( self , _a = None , _a = 0.8 , _a = 1 , _a = None , _a = 0.0 , _a = 50 , _a = None , _a = "pil" , _a = True , ):
self.check_inputs(_a )
# 2. Preprocess image
__magic_name__ : str = preprocess(_a )
# 3. set timesteps
self.scheduler.set_timesteps(_a , device=self.device )
__magic_name__ , __magic_name__ : str = self.get_timesteps(_a , _a , self.device )
__magic_name__ : List[str] = timesteps[:1].repeat(_a )
# 4. Prepare latent variables
__magic_name__ : Optional[int] = self.prepare_latents(_a , _a , _a , self.unet.dtype , self.device , _a )
__magic_name__ : List[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(_a ):
# 1. predict noise model_output
__magic_name__ : str = self.unet(_a , _a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__magic_name__ : str = self.scheduler.step(
_a , _a , _a , eta=_a , use_clipped_model_output=_a , generator=_a , ).prev_sample
__magic_name__ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
__magic_name__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__magic_name__ : Optional[int] = self.numpy_to_pil(_a )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_a )
| 124
| 1
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
A_ = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Tuple = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir, """schedulers/""" ) )
_snake_case : Tuple = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCamelCase, """src/diffusers/schedulers/scheduling_ddpm.py""" ), os.path.join(self.diffusers_dir, """schedulers/scheduling_ddpm.py""" ), )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : str = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def UpperCamelCase_ ( self: Dict, a_: Optional[Any], a_: int, a_: Dict, a_: Union[str, Any]=None ):
'''simple docstring'''
_snake_case : Dict = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
_snake_case : int = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
_snake_case : Any = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119 )
_snake_case : Optional[int] = black.format_str(_UpperCamelCase, mode=_UpperCamelCase )
_snake_case : Optional[Any] = os.path.join(self.diffusers_dir, """new_code.py""" )
with open(_UpperCamelCase, """w""", newline="""\n""" ) as f:
f.write(_UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name, overwrite=_UpperCamelCase )
with open(_UpperCamelCase, """r""" ) as f:
self.assertTrue(f.read(), _UpperCamelCase )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(_UpperCamelCase, _UpperCamelCase )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""", """DDPMSchedulerOutput""", REFERENCE_CODE + """\n""", )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""", """DDPMSchedulerOutput""", _UpperCamelCase, )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""", """TestSchedulerOutput""", re.sub("""DDPM""", """Test""", _UpperCamelCase ), )
# Copy consistency with a really long name
_snake_case : Union[str, Any] = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}", f"{long_class_name}SchedulerOutput", re.sub("""Bert""", _UpperCamelCase, _UpperCamelCase ), )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""", """TestSchedulerOutput""", _UpperCamelCase, overwrite_result=re.sub("""DDPM""", """Test""", _UpperCamelCase ), )
| 715
|
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[Any] = features.copy() if features else default_expected_features
_snake_case : List[Any] = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
if issubclass(snake_case__ , snake_case__ ):
_snake_case : Optional[Any] = parquet_path
elif issubclass(snake_case__ , snake_case__ ):
_snake_case : int = [parquet_path]
_snake_case : Union[str, Any] = tmp_path / """cache"""
_snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
_snake_case : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = tmp_path / """cache"""
_snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Optional[int] = tmp_path / """cache"""
_snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Optional[Any] = features.copy() if features else default_expected_features
_snake_case : Dict = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
if split:
_snake_case : int = {split: parquet_path}
else:
_snake_case : Optional[Any] = """train"""
_snake_case : int = {"""train""": parquet_path, """test""": parquet_path}
_snake_case : Dict = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" )
_snake_case : int = pf.read()
assert dataset.data.table == output_table
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" )
_snake_case : Tuple = {"""image""": [image_path]}
_snake_case : Optional[int] = Features({"""image""": Image()} )
_snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ )
_snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
assert get_writer_batch_size(snake_case__ ) == expected
| 28
| 0
|
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def a_ ( self : Union[str, Any] , A__ : float ):
"""simple docstring"""
return 0.0
def __lowercase (_lowercase, _lowercase ) -> tuple[int | float, int | float]:
"""simple docstring"""
__lowerCamelCase : Dict = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__lowerCamelCase : List[str] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __lowercase (_lowercase, _lowercase ) -> None:
"""simple docstring"""
__lowerCamelCase : int = 512
__lowerCamelCase : List[str] = [1] + [0] * (size - 1)
__lowerCamelCase : Dict = [filter_type.process(_lowercase ) for item in inputs]
__lowerCamelCase : List[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowerCamelCase : Union[str, Any] = np.abs(np.fft.fft(_lowercase ) )
__lowerCamelCase : Dict = 20 * np.logaa(_lowercase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
__lowerCamelCase : List[Any] = get_bounds(_lowercase, _lowercase )
plt.ylim(max([-80, bounds[0]] ), min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(_lowercase )
plt.show()
def __lowercase (_lowercase, _lowercase ) -> None:
"""simple docstring"""
__lowerCamelCase : Dict = 512
__lowerCamelCase : List[str] = [1] + [0] * (size - 1)
__lowerCamelCase : str = [filter_type.process(_lowercase ) for item in inputs]
__lowerCamelCase : Optional[int] = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowerCamelCase : List[Any] = np.angle(np.fft.fft(_lowercase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi, 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(_lowercase, -2 * pi ) )
plt.show()
| 150
|
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ :List[str] = logging.get_logger(__name__)
UpperCAmelCase__ :List[str] = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : Dict = 'efficientformer'
def __init__( self : Optional[Any] , A__ : List[int] = [3, 2, 6, 4] , A__ : List[int] = [48, 96, 224, 448] , A__ : List[bool] = [True, True, True, True] , A__ : int = 448 , A__ : int = 32 , A__ : int = 4 , A__ : int = 7 , A__ : int = 5 , A__ : int = 8 , A__ : int = 4 , A__ : float = 0.0 , A__ : int = 16 , A__ : int = 3 , A__ : int = 3 , A__ : int = 3 , A__ : int = 2 , A__ : int = 1 , A__ : float = 0.0 , A__ : int = 1 , A__ : bool = True , A__ : bool = True , A__ : float = 1e-5 , A__ : str = "gelu" , A__ : float = 0.02 , A__ : float = 1e-1_2 , A__ : int = 224 , A__ : float = 1e-0_5 , **A__ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**A__ )
__lowerCamelCase : Optional[int] = hidden_act
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : Any = hidden_sizes
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : Union[str, Any] = initializer_range
__lowerCamelCase : Tuple = layer_norm_eps
__lowerCamelCase : Optional[int] = patch_size
__lowerCamelCase : Union[str, Any] = num_channels
__lowerCamelCase : Dict = depths
__lowerCamelCase : Optional[Any] = mlp_expansion_ratio
__lowerCamelCase : int = downsamples
__lowerCamelCase : List[str] = dim
__lowerCamelCase : Dict = key_dim
__lowerCamelCase : List[Any] = attention_ratio
__lowerCamelCase : str = resolution
__lowerCamelCase : Union[str, Any] = pool_size
__lowerCamelCase : Optional[int] = downsample_patch_size
__lowerCamelCase : Any = downsample_stride
__lowerCamelCase : Dict = downsample_pad
__lowerCamelCase : int = drop_path_rate
__lowerCamelCase : Tuple = num_metaad_blocks
__lowerCamelCase : Optional[Any] = distillation
__lowerCamelCase : List[Any] = use_layer_scale
__lowerCamelCase : List[Any] = layer_scale_init_value
__lowerCamelCase : int = image_size
__lowerCamelCase : Dict = batch_norm_eps
| 150
| 1
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A__ ( A , A , unittest.TestCase ):
"""simple docstring"""
_lowercase : List[str] = IFImgaImgSuperResolutionPipeline
_lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_lowercase : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
_lowercase : Any = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def __magic_name__ ( self : Optional[Any] , A_ : int , A_ : int=0 ):
'''simple docstring'''
if str(A_ ).startswith("mps" ):
_lowerCAmelCase : List[str] = torch.manual_seed(A_ )
else:
_lowerCAmelCase : int = torch.Generator(device=A_ ).manual_seed(A_ )
_lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A_ ) ).to(A_ )
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A_ ) ).to(A_ )
_lowerCAmelCase : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __magic_name__ ( self : int ):
'''simple docstring'''
self._test_save_load_local()
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 503
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('T')
class A__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[int] , A_ : bool = True ):
'''simple docstring'''
_lowerCAmelCase : dict[T, list[T]] = {} # dictionary of lists
_lowerCAmelCase : List[Any] = directed
def __magic_name__ ( self : Any , A_ : T , A_ : T ):
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(A_ )
self.adj_list[destination_vertex].append(A_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(A_ )
_lowerCAmelCase : List[Any] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(A_ )
_lowerCAmelCase : List[str] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_lowerCAmelCase : List[str] = [destination_vertex]
_lowerCAmelCase : Dict = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(A_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(A_ )
_lowerCAmelCase : Tuple = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_lowerCAmelCase : Dict = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_lowerCAmelCase : Any = [destination_vertex]
_lowerCAmelCase : Union[str, Any] = []
return self
def __repr__( self : List[str] ):
'''simple docstring'''
return pformat(self.adj_list )
| 503
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.0_2 , A_=None , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase = (image_size // patch_size) ** 2
lowerCAmelCase = num_patches + 1
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ) -> Optional[int]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __snake_case ( self , A_ , A_ , A_ ) -> str:
lowerCAmelCase = ViTMSNModel(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self , A_ , A_ , A_ ) -> List[str]:
lowerCAmelCase = self.type_sequence_label_size
lowerCAmelCase = ViTMSNForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ , labels=A_ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase = 1
lowerCAmelCase = ViTMSNForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCAmelCase : Optional[int] = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase : Tuple = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : List[Any] = False
def __snake_case ( self ) -> int:
lowerCAmelCase = ViTMSNModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __snake_case ( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __snake_case ( self ) -> Optional[Any]:
pass
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(A_ )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A_ )
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __snake_case ( self ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def __snake_case ( self ) -> int:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = ViTMSNModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _snake_case ( ) -> str:
"""simple docstring"""
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ) -> List[str]:
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __snake_case ( self ) -> List[str]:
torch.manual_seed(2 )
lowerCAmelCase = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(A_ )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=A_ , return_tensors="""pt""" ).to(A_ )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**A_ )
# verify the logits
lowerCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
lowerCAmelCase = torch.tensor([-0.0_8_0_3, -0.4_4_5_4, -0.2_3_7_5] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
| 433
|
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase = tmp_path / """file.csv"""
lowerCAmelCase = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = tmp_path / """malformed_file.csv"""
lowerCAmelCase = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = tmp_path / """csv_with_image.csv"""
lowerCAmelCase = textwrap.dedent(
f'\\n image\n {image_file}\n ' )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def _snake_case ( _SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
lowerCAmelCase = tmp_path / """csv_with_label.csv"""
lowerCAmelCase = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = tmp_path / """csv_with_int_list.csv"""
lowerCAmelCase = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = Csv()
lowerCAmelCase = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_SCREAMING_SNAKE_CASE , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(_SCREAMING_SNAKE_CASE ) in record.message
for record in caplog.records )
@require_pil
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict ) -> Tuple:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as f:
lowerCAmelCase = f.read().splitlines()[1]
lowerCAmelCase = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
lowerCAmelCase = csv._generate_tables([[csv_file_with_image]] )
lowerCAmelCase = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
lowerCAmelCase = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as f:
lowerCAmelCase = f.read().splitlines()[1:]
lowerCAmelCase = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
lowerCAmelCase = csv._generate_tables([[csv_file_with_label]] )
lowerCAmelCase = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
lowerCAmelCase = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(_SCREAMING_SNAKE_CASE ) for label in labels]
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda _SCREAMING_SNAKE_CASE : [int(_SCREAMING_SNAKE_CASE ) for i in x.split()]} )
lowerCAmelCase = csv._generate_tables([[csv_file_with_int_list]] )
lowerCAmelCase = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
lowerCAmelCase = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 433
| 1
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 335
|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def snake_case_ ( snake_case = "" ) -> dict[str, float]:
lowercase__: Any = url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
lowercase__: Optional[Any] = BeautifulSoup(requests.get(snake_case ).text , 'html.parser' )
lowercase__: Optional[int] = soup.find_all('td' , attrs='titleColumn' )
lowercase__: Optional[int] = soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(snake_case , snake_case )
}
def snake_case_ ( snake_case = "IMDb_Top_250_Movies.csv" ) -> None:
lowercase__: Optional[Any] = get_imdb_top_aaa_movies()
with open(snake_case , 'w' , newline='' ) as out_file:
lowercase__: Optional[Any] = csv.writer(snake_case )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 335
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ ={'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 263
|
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def UpperCamelCase_ ( A__ , A__ ):
a_ = []
for part_id in partition_order:
a_ = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(A__ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase_ ( ):
a_ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
a_ = spark.range(1_00 ).repartition(1 )
a_ = Spark(A__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase_ ( ):
a_ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
a_ = spark.range(10 ).repartition(2 )
a_ = [1, 0]
a_ = _generate_iterable_examples(A__ , A__ ) # Reverse the partitions.
a_ = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , A__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a_ , a_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase_ ( ):
a_ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
a_ = spark.range(10 ).repartition(1 )
a_ = SparkExamplesIterable(A__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(A__ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase_ ( ):
a_ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
a_ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
a_ = lambda A__ : x.reverse()
a_ = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , [2, 1, 0] )
a_ = SparkExamplesIterable(A__ ).shuffle_data_sources(A__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(A__ ):
a_ , a_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase_ ( ):
a_ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
a_ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a_ = SparkExamplesIterable(A__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(A__ ):
a_ , a_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a_ = SparkExamplesIterable(A__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(A__ ):
a_ , a_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase_ ( ):
a_ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
a_ = spark.range(1_00 ).repartition(1 )
a_ = Spark(A__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 263
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] ) -> Optional[int]:
__a = WavaVecaForSequenceClassification.from_pretrained(lowerCAmelCase__ , config=lowerCAmelCase__ )
__a = downstream_dict["projector.weight"]
__a = downstream_dict["projector.bias"]
__a = downstream_dict["model.post_net.linear.weight"]
__a = downstream_dict["model.post_net.linear.bias"]
return model
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple ) -> Optional[Any]:
__a = WavaVecaForAudioFrameClassification.from_pretrained(lowerCAmelCase__ , config=lowerCAmelCase__ )
__a = downstream_dict["model.linear.weight"]
__a = downstream_dict["model.linear.bias"]
return model
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict ) -> Optional[Any]:
__a = WavaVecaForXVector.from_pretrained(lowerCAmelCase__ , config=lowerCAmelCase__ )
__a = downstream_dict["connector.weight"]
__a = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__a = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
__a = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
__a = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
__a = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
__a = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
__a = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
__a = downstream_dict["objective.W"]
return model
@torch.no_grad()
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int ) -> Optional[Any]:
__a = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
__a = checkpoint["Downstream"]
__a = WavaVecaConfig.from_pretrained(lowerCAmelCase__ )
__a = WavaVecaFeatureExtractor.from_pretrained(
lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ )
__a = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__a = convert_classification(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__a = convert_diarization(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
elif arch.endswith('''ForXVector''' ):
__a = convert_xvector(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
__a = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowerCAmelCase__ )
hf_model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
lowercase_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 720
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase_ = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowercase ( lowerCAmelCase__ : List[Any] ) -> str:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : int ) -> Union[str, Any]:
from transformers.testing_utils import pytest_terminal_summary_main
__a = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowerCAmelCase__ , id=lowerCAmelCase__ )
| 65
| 0
|
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
SCREAMING_SNAKE_CASE = pytest.mark.integration
SCREAMING_SNAKE_CASE = {'comet'}
SCREAMING_SNAKE_CASE = importlib.util.find_spec('fairseq') is not None
SCREAMING_SNAKE_CASE = {'code_eval'}
SCREAMING_SNAKE_CASE = os.name == 'nt'
SCREAMING_SNAKE_CASE = {'bertscore', 'frugalscore', 'perplexity'}
SCREAMING_SNAKE_CASE = importlib.util.find_spec('transformers') is not None
def a (lowerCAmelCase__ ):
@wraps(lowerCAmelCase__ )
def wrapper(self , lowerCAmelCase__ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , lowerCAmelCase__ )
return wrapper
def a (lowerCAmelCase__ ):
@wraps(lowerCAmelCase__ )
def wrapper(self , lowerCAmelCase__ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , lowerCAmelCase__ )
return wrapper
def a (lowerCAmelCase__ ):
@wraps(lowerCAmelCase__ )
def wrapper(self , lowerCAmelCase__ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , lowerCAmelCase__ )
return wrapper
def a ():
__a = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__A , __A , __A )
@local
class __UpperCAmelCase ( parameterized.TestCase ):
"""simple docstring"""
_lowerCamelCase = {}
_lowerCamelCase = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def snake_case_ ( self , __A ):
__a = """[...]"""
__a = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , __A ) ).module_path )
__a = datasets.load.import_main_class(metric_module.__name__ , dataset=__A )
# check parameters
__a = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__A , metric_module.__name__ ):
with self.use_local_metrics():
try:
__a = doctest.testmod(__A , verbose=__A , raise_on_error=__A )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def snake_case_ ( self , __A ):
__a = """[...]"""
__a = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , __A ) ).module_path )
# run doctest
with self.use_local_metrics():
__a = doctest.testmod(__A , verbose=__A , raise_on_error=__A )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def snake_case_ ( self , __A , __A ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__A ):
yield
else:
yield
@contextmanager
def snake_case_ ( self ):
def load_local_metric(__A , *__A , **__A ):
return load_metric(os.path.join("""metrics""" , __A ) , *__A , **__A )
with patch("""datasets.load_metric""" ) as mock_load_metric:
__a = load_local_metric
yield
@classmethod
def snake_case_ ( cls , __A ):
def wrapper(__A ):
__a = contextmanager(__A )
__a = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def a (lowerCAmelCase__ ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class __UpperCAmelCase ( __A ):
"""simple docstring"""
def snake_case_ ( self , __A ):
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
__a = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def a (lowerCAmelCase__ ):
import torch
def bert_cos_score_idf(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(lowerCAmelCase__ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
__a = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def a (lowerCAmelCase__ ):
def load_from_checkpoint(lowerCAmelCase__ ):
class __UpperCAmelCase :
"""simple docstring"""
def snake_case_ ( self , __A , *__A , **__A ):
assert len(__A ) == 2
__a = [0.19, 0.92]
return scores, sum(__A ) / len(__A )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
__a = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
__a = load_from_checkpoint
yield
def a ():
__a = load_metric(os.path.join("""metrics""" , """seqeval""" ) )
__a = """ERROR"""
__a = f'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(lowerCAmelCase__ , match=re.escape(lowerCAmelCase__ ) ):
metric.compute(predictions=[] , references=[] , scheme=lowerCAmelCase__ )
| 99
|
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
return "\n".join(
F"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 436
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = tempfile.mkdtemp()
lowercase = BlipImageProcessor()
lowercase = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
lowercase = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
lowercase = InstructBlipProcessor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def _a ( self , **_lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).tokenizer
def _a ( self , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).image_processor
def _a ( self , **_lowerCAmelCase ) -> Dict:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).qformer_tokenizer
def _a ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase = [Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowercase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase = self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
lowercase = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , _lowerCAmelCase )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = self.get_qformer_tokenizer()
lowercase = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
lowercase = self.prepare_image_inputs()
lowercase = image_processor(_lowerCAmelCase , return_tensors="""np""" )
lowercase = processor(images=_lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = self.get_qformer_tokenizer()
lowercase = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
lowercase = """lower newer"""
lowercase = processor(text=_lowerCAmelCase )
lowercase = tokenizer(_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
lowercase = qformer_tokenizer(_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = self.get_qformer_tokenizer()
lowercase = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
lowercase = """lower newer"""
lowercase = self.prepare_image_inputs()
lowercase = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = self.get_qformer_tokenizer()
lowercase = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase = processor.batch_decode(_lowerCAmelCase )
lowercase = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = self.get_qformer_tokenizer()
lowercase = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
lowercase = """lower newer"""
lowercase = self.prepare_image_inputs()
lowercase = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 707
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : str = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''vit_msn'''
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-06 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias
| 653
| 0
|
import os
import pytest
from attr import dataclass
snake_case : Any = """us-east-1""" # defaults region
@dataclass
class _snake_case :
UpperCamelCase__ = 42
UpperCamelCase__ = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
UpperCamelCase__ = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 500,
'save_steps': 5500,
}
UpperCamelCase__ = {**hyperparameters, 'max_steps': 1000}
@property
def SCREAMING_SNAKE_CASE ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def SCREAMING_SNAKE_CASE ( self ):
return f'''{self.framework}-transfromers-test'''
@property
def SCREAMING_SNAKE_CASE ( self ):
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def SCREAMING_SNAKE_CASE ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def lowerCAmelCase_ ( _snake_case : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : List[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 124
|
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowerCamelCase_ ( a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = CpmAntTokenizer
SCREAMING_SNAKE_CASE_ = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
a = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
a = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
a = '''今天天气真好!'''
a = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
a = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
a = '''今天天气真好!'''
a = [tokenizer.bos_token] + tokens
a = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) ,__lowerCamelCase )
a = tokenizer.decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
| 387
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''Salesforce/blip-vqa-base''': '''https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json''',
'''Salesforce/blip-vqa-capfit-large''': (
'''https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-base''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-large''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'''
),
'''Salesforce/blip-itm-base-coco''': '''https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-large-coco''': '''https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-base-flikr''': '''https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json''',
'''Salesforce/blip-itm-large-flikr''': (
'''https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'''
),
}
class __a ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = "blip_text_model"
def __init__( self : Optional[int] , snake_case_ : str=3_05_24 , snake_case_ : Optional[Any]=7_68 , snake_case_ : Union[str, Any]=7_68 , snake_case_ : Any=30_72 , snake_case_ : int=7_68 , snake_case_ : Union[str, Any]=12 , snake_case_ : List[Any]=8 , snake_case_ : int=5_12 , snake_case_ : Optional[Any]="gelu" , snake_case_ : List[Any]=1e-12 , snake_case_ : List[str]=0.0 , snake_case_ : str=0.0 , snake_case_ : Optional[int]=0.0_2 , snake_case_ : Union[str, Any]=3_05_22 , snake_case_ : int=2 , snake_case_ : Optional[int]=0 , snake_case_ : int=1_02 , snake_case_ : str=True , snake_case_ : int=True , **snake_case_ : List[Any] , )-> Dict:
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , sep_token_id=snake_case_ , **snake_case_ , )
__lowerCAmelCase =vocab_size
__lowerCAmelCase =hidden_size
__lowerCAmelCase =encoder_hidden_size
__lowerCAmelCase =intermediate_size
__lowerCAmelCase =projection_dim
__lowerCAmelCase =hidden_dropout_prob
__lowerCAmelCase =num_hidden_layers
__lowerCAmelCase =num_attention_heads
__lowerCAmelCase =max_position_embeddings
__lowerCAmelCase =layer_norm_eps
__lowerCAmelCase =hidden_act
__lowerCAmelCase =initializer_range
__lowerCAmelCase =attention_probs_dropout_prob
__lowerCAmelCase =is_decoder
__lowerCAmelCase =use_cache
@classmethod
def UpperCamelCase ( cls : Optional[int] , snake_case_ : Union[str, os.PathLike] , **snake_case_ : int)-> "PretrainedConfig":
cls._set_token_in_kwargs(snake_case_)
__lowerCAmelCase , __lowerCAmelCase =cls.get_config_dict(snake_case_ , **snake_case_)
# get the text config dict if we are loading from BlipConfig
if config_dict.get("""model_type""") == "blip":
__lowerCAmelCase =config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(snake_case_ , **snake_case_)
class __a ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = "blip_vision_model"
def __init__( self : Union[str, Any] , snake_case_ : int=7_68 , snake_case_ : int=30_72 , snake_case_ : Dict=5_12 , snake_case_ : int=12 , snake_case_ : str=12 , snake_case_ : str=3_84 , snake_case_ : int=16 , snake_case_ : Optional[int]="gelu" , snake_case_ : Optional[int]=1e-5 , snake_case_ : str=0.0 , snake_case_ : Any=1e-10 , **snake_case_ : Dict , )-> Dict:
super().__init__(**snake_case_)
__lowerCAmelCase =hidden_size
__lowerCAmelCase =intermediate_size
__lowerCAmelCase =projection_dim
__lowerCAmelCase =num_hidden_layers
__lowerCAmelCase =num_attention_heads
__lowerCAmelCase =patch_size
__lowerCAmelCase =image_size
__lowerCAmelCase =initializer_range
__lowerCAmelCase =attention_dropout
__lowerCAmelCase =layer_norm_eps
__lowerCAmelCase =hidden_act
@classmethod
def UpperCamelCase ( cls : Tuple , snake_case_ : Union[str, os.PathLike] , **snake_case_ : List[str])-> "PretrainedConfig":
cls._set_token_in_kwargs(snake_case_)
__lowerCAmelCase , __lowerCAmelCase =cls.get_config_dict(snake_case_ , **snake_case_)
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("""model_type""") == "blip":
__lowerCAmelCase =config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(snake_case_ , **snake_case_)
class __a ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = "blip"
SCREAMING_SNAKE_CASE = True
def __init__( self : Tuple , snake_case_ : List[str]=None , snake_case_ : List[Any]=None , snake_case_ : Union[str, Any]=5_12 , snake_case_ : List[str]=2.6_5_9_2 , snake_case_ : Union[str, Any]=2_56 , **snake_case_ : Tuple , )-> Union[str, Any]:
super().__init__(**snake_case_)
if text_config is None:
__lowerCAmelCase ={}
logger.info("""`text_config` is `None`. Initializing the `BlipTextConfig` with default values.""")
if vision_config is None:
__lowerCAmelCase ={}
logger.info("""`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.""")
__lowerCAmelCase =BlipTextConfig(**snake_case_)
__lowerCAmelCase =BlipVisionConfig(**snake_case_)
__lowerCAmelCase =self.vision_config.hidden_size
__lowerCAmelCase =projection_dim
__lowerCAmelCase =logit_scale_init_value
__lowerCAmelCase =1.0
__lowerCAmelCase =0.0_2
__lowerCAmelCase =image_text_hidden_size
@classmethod
def UpperCamelCase ( cls : str , snake_case_ : BlipTextConfig , snake_case_ : BlipVisionConfig , **snake_case_ : Any)-> Union[str, Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case_)
def UpperCamelCase ( self : str)-> Any:
__lowerCAmelCase =copy.deepcopy(self.__dict__)
__lowerCAmelCase =self.text_config.to_dict()
__lowerCAmelCase =self.vision_config.to_dict()
__lowerCAmelCase =self.__class__.model_type
return output
| 456
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowercase_ = TypeVar('''T''')
lowercase_ = Union[List[T], Tuple[T, ...]]
lowercase_ = Union[T, List[T], Dict[str, T]]
lowercase_ = Union[str, bytes, os.PathLike]
| 456
| 1
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase: Tuple = get_logger(__name__)
_lowerCAmelCase: List[str] = Path(__file__).parent / 'model_card_template.md'
_lowerCAmelCase: Any = uuida().hex
_lowerCAmelCase: List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: int = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _lowercase( __a : Union[Dict, str, None] = None ):
a__ =f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__a , __a ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__a , __a ):
ua += "; " + user_agent
return ua
def _lowercase( __a : str , __a : Optional[str] = None , __a : Optional[str] = None ):
if token is None:
a__ =HfFolder.get_token()
if organization is None:
a__ =whoami(__a )['name']
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def _lowercase( __a : Union[str, Any] , __a : Dict ):
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(__a , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
a__ =args.hub_token if hasattr(__a , 'hub_token' ) else None
a__ =get_full_repo_name(__a , token=__a )
a__ =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__a , model_name=__a , repo_name=__a , dataset_name=args.dataset_name if hasattr(__a , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__a , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__a , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__a , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__a , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__a , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__a , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(__a , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__a , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
a__ =os.path.join(args.output_dir , 'README.md' )
model_card.save(__a )
def _lowercase( __a : Optional[str] , __a : Optional[str] = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
a__ =str(Path(__a ).as_posix() )
a__ =re.search(r'snapshots/([^/]+)/' , __a )
if search is None:
return None
a__ =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase: List[str] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
_lowerCAmelCase: List[str] = os.path.join(hf_cache_home, 'diffusers')
def _lowercase( __a : Optional[str] = None , __a : Optional[str] = None ):
if new_cache_dir is None:
a__ =DIFFUSERS_CACHE
if old_cache_dir is None:
a__ =old_diffusers_cache
a__ =Path(__a ).expanduser()
a__ =Path(__a ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
a__ =new_cache_dir / old_blob_path.relative_to(__a )
new_blob_path.parent.mkdir(parents=__a , exist_ok=__a )
os.replace(__a , __a )
try:
os.symlink(__a , __a )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase: Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
_lowerCAmelCase: int = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase: List[Any] = int(f.read())
except ValueError:
_lowerCAmelCase: Any = 0
if cache_version < 1:
_lowerCAmelCase: str = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase: Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def _lowercase( __a : str , __a : Optional[str] = None ):
if variant is not None:
a__ =weights_name.split('.' )
a__ =splits[:-1] + [variant] + splits[-1:]
a__ ='.'.join(__a )
return weights_name
def _lowercase( __a : Union[str, Any] , *,
__a : Optional[Any] , __a : Optional[Any] , __a : List[Any] , __a : Tuple , __a : Optional[Any] , __a : Dict , __a : str , __a : int , __a : Tuple , __a : Union[str, Any] , __a : int=None , ):
a__ =str(__a )
if os.path.isfile(__a ):
return pretrained_model_name_or_path
elif os.path.isdir(__a ):
if os.path.isfile(os.path.join(__a , __a ) ):
# Load from a PyTorch checkpoint
a__ =os.path.join(__a , __a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__a , __a , __a ) ):
a__ =os.path.join(__a , __a , __a )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__a ).base_version ) >= version.parse('0.20.0' )
):
try:
a__ =hf_hub_download(
__a , filename=_add_variant(__a , __a ) , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __a , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a , __a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__a , __a )}' so that the correct variant file can be added.""" , __a , )
try:
# 2. Load model file as usual
a__ =hf_hub_download(
__a , filename=__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'this model name. Check the model page at '
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 20
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ) -> Union[str, Any]:
a__ =parent
a__ =out_indices if out_indices is not None else [4]
a__ =stage_names
a__ =out_features
a__ =backbone
a__ =batch_size
a__ =image_size
a__ =num_channels
a__ =use_pretrained_backbone
a__ =is_training
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =self.get_config()
return config, pixel_values
def __UpperCamelCase ( self) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =TimmBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
a__ =model(lowercase_)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __UpperCamelCase ( self) -> str:
a__ =self.prepare_config_and_inputs()
a__ , a__ =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(TimmBackbone,) if is_torch_available() else ()
snake_case ={'feature-extraction': TimmBackbone} if is_torch_available() else {}
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =TimmBackboneModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCamelCase ( self) -> Dict:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self) -> str:
a__ ='resnet18'
a__ ='microsoft/resnet-18'
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_)
a__ =AutoBackbone.from_pretrained(lowercase_)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3])
a__ =AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def __UpperCamelCase ( self) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self) -> Optional[Any]:
pass
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =self.has_attentions
# no need to test all models as different heads yield the same functionality
a__ =self.all_model_classes[0]
a__ =model_class(lowercase_)
model.to(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(**lowercase_)
a__ =outputs[0][-1]
# Encoder-/Decoder-only models
a__ =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a__ =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def __UpperCamelCase ( self) -> List[str]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
a__ =copy.deepcopy(lowercase_)
a__ =None
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
a__ =copy.deepcopy(lowercase_)
a__ =False
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
| 20
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=2 , lowerCAmelCase_=32 , lowerCAmelCase_=16 , lowerCAmelCase_=3 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=32 , lowerCAmelCase_=4 , lowerCAmelCase_=[0, 1, 2, 3] , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=[1, 3_84, 24, 24] , lowerCAmelCase_=True , lowerCAmelCase_=None , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = backbone_out_indices
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = backbone_featmap_shape
_snake_case = scope
_snake_case = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_snake_case = (image_size // patch_size) ** 2
_snake_case = num_patches + 1
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 1_92, 3_84, 7_68],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=lowerCAmelCase_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = DPTModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.num_labels
_snake_case = DPTForDepthEstimation(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.num_labels
_snake_case = DPTForSemanticSegmentation(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
__lowercase = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__lowercase = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowercase = False
__lowercase = False
__lowercase = False
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = DPTModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
if model_class in get_values(lowerCAmelCase_ ):
continue
_snake_case = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
_snake_case = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_snake_case = model(**lowerCAmelCase_ ).loss
loss.backward()
def lowerCamelCase ( self ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = False
_snake_case = True
if model_class in get_values(lowerCAmelCase_ ) or not model_class.supports_gradient_checkpointing:
continue
_snake_case = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.gradient_checkpointing_enable()
model.train()
_snake_case = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_snake_case = model(**lowerCAmelCase_ ).loss
loss.backward()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = _config_zero_init(lowerCAmelCase_ )
for model_class in self.all_model_classes:
_snake_case = model_class(config=lowerCAmelCase_ )
# Skip the check for the backbone
_snake_case = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_snake_case = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_snake_case = DPTModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = 'add'
with self.assertRaises(lowerCAmelCase_ ):
_snake_case = DPTForDepthEstimation(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
_snake_case = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(lowerCAmelCase_ )
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_snake_case = model(**lowerCAmelCase_ )
_snake_case = outputs.predicted_depth
# verify the predicted depth
_snake_case = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , lowerCAmelCase_ )
_snake_case = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , lowerCAmelCase_ , atol=1E-4 ) )
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : int = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 542
| 0
|
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case (__lowercase):
UpperCamelCase_ = FileLock(str(tmpdir / 'foo.lock'))
UpperCamelCase_ = FileLock(str(tmpdir / 'foo.lock'))
UpperCamelCase_ = 0.01
with locka.acquire():
with pytest.raises(__lowercase):
UpperCamelCase_ = time.time()
locka.acquire(__lowercase)
assert time.time() - _start > timeout
def _snake_case (__lowercase):
UpperCamelCase_ = 'a' * 1000 + '.lock'
UpperCamelCase_ = FileLock(str(tmpdir / filename))
assert locka._lock_file.endswith('.lock')
assert not locka._lock_file.endswith(__lowercase)
assert len(os.path.basename(locka._lock_file)) <= 255
UpperCamelCase_ = FileLock(tmpdir / filename)
with locka.acquire():
with pytest.raises(__lowercase):
locka.acquire(0)
| 23
|
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__A = object()
# For specifying empty leaf dict `{}`
__A = object()
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
__lowerCAmelCase: Dict = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(__SCREAMING_SNAKE_CASE ) - len(__SCREAMING_SNAKE_CASE ) + 1 ):
__lowerCAmelCase: Tuple = [x.match(__SCREAMING_SNAKE_CASE ) for x, y in zip(__SCREAMING_SNAKE_CASE , ks[i:] )]
if matches and all(__SCREAMING_SNAKE_CASE ):
return True
return False
def a__ ( __SCREAMING_SNAKE_CASE ) -> List[Any]:
def replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for rule, replacement in rules:
if _match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return replacement
return val
return replace
def a__ ( ) -> str:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , __SCREAMING_SNAKE_CASE )),
(("transformer", "wte", "embedding"), P("mp" , __SCREAMING_SNAKE_CASE )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__SCREAMING_SNAKE_CASE , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , __SCREAMING_SNAKE_CASE )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__SCREAMING_SNAKE_CASE , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , __SCREAMING_SNAKE_CASE )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def a__ ( __SCREAMING_SNAKE_CASE ) -> List[str]:
__lowerCAmelCase: Any = _get_partition_rules()
__lowerCAmelCase: List[Any] = _replacement_rules(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = {k: _unmatched for k in flatten_dict(__SCREAMING_SNAKE_CASE )}
__lowerCAmelCase: Any = {k: replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__SCREAMING_SNAKE_CASE ) )
| 346
| 0
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_lowerCAmelCase = re.compile("""[^A-Za-z_0-9]""")
# parameters used in DuplicationIndex
_lowerCAmelCase = 10
_lowerCAmelCase = 256
def lowercase ( _a ) -> Optional[MinHash]:
if len(_a ) < MIN_NUM_TOKENS:
return None
UpperCAmelCase_: Optional[int] = MinHash(num_perm=_a )
for token in set(_a ):
min_hash.update(token.encode() )
return min_hash
def lowercase ( _a ) -> Set[str]:
return {t for t in NON_ALPHA.split(_a ) if len(t.strip() ) > 0}
class UpperCAmelCase__ :
def __init__( self , *,
A__ = 0.85 , ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = duplication_jaccard_threshold
UpperCAmelCase_: Tuple = NUM_PERM
UpperCAmelCase_: Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
UpperCAmelCase_: int = defaultdict(A__ )
def snake_case_ ( self , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: int = self._index.query(A__ )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(A__ , A__ )
if len(A__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = []
for base, duplicates in self._duplicate_clusters.items():
UpperCAmelCase_: List[str] = [base] + list(A__ )
# reformat the cluster to be a list of dict
UpperCAmelCase_: Optional[Any] = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(A__ )
return duplicate_clusters
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: str = self.get_duplicate_clusters()
with open(A__ , "w" ) as f:
json.dump(A__ , A__ )
def lowercase ( _a ) -> int:
UpperCAmelCase_ , UpperCAmelCase_: Any = element
UpperCAmelCase_: Dict = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowercase ( _a ) -> List[str]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(_a ,max_queue_size=10000 ) ,chunksize=100 ,):
if data is not None:
yield data
def lowercase ( _a ,_a ) -> str:
UpperCAmelCase_: Optional[int] = DuplicationIndex(duplication_jaccard_threshold=_a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_a ) ) ,max_queue_size=100 ) ):
di.add(_a ,_a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowercase ( _a ,_a ) -> float:
UpperCAmelCase_: Any = get_tokens(_a )
UpperCAmelCase_: Dict = get_tokens(_a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_lowerCAmelCase = None
def lowercase ( _a ,_a ) -> Optional[Any]:
UpperCAmelCase_: List[Any] = []
for elementa in cluster:
UpperCAmelCase_: Optional[int] = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
UpperCAmelCase_: int = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(_a ,_a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
UpperCAmelCase_: str = 1
extremes.append(_a )
return extremes
def lowercase ( _a ,_a ,_a ) -> Optional[Any]:
global _shared_dataset
UpperCAmelCase_: Any = dataset
UpperCAmelCase_: int = []
UpperCAmelCase_: Optional[int] = partial(_find_cluster_extremes_shared ,jaccard_threshold=_a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_a ,_a ,) ,total=len(_a ) ,):
extremes_list.append(_a )
return extremes_list
def lowercase ( _a ,_a = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
UpperCAmelCase_: Optional[int] = make_duplicate_clusters(_a ,_a )
UpperCAmelCase_: List[str] = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
UpperCAmelCase_: List[str] = {}
UpperCAmelCase_: Optional[Any] = find_extremes(_a ,_a ,_a )
for extremes in extremes_clusters:
for element in extremes:
UpperCAmelCase_: str = element
UpperCAmelCase_: str = duplicate_indices - set(extreme_dict.keys() )
UpperCAmelCase_: List[Any] = dataset.filter(lambda _a ,_a : idx not in remove_indices ,with_indices=_a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
UpperCAmelCase_: int = element["base_index"] in extreme_dict
if element["is_extreme"]:
UpperCAmelCase_: Optional[int] = extreme_dict[element["base_index"]]["copies"]
print(f"Original dataset size: {len(_a )}" )
print(f"Number of duplicate clusters: {len(_a )}" )
print(f"Files in duplicate cluster: {len(_a )}" )
print(f"Unique files in duplicate cluster: {len(_a )}" )
print(f"Filtered dataset size: {len(_a )}" )
return ds_filter, duplicate_clusters
| 306
|
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
_lowerCAmelCase = """sshleifer/mar_enro_6_3_student"""
class UpperCAmelCase__ ( snake_case__ ):
def snake_case_ ( self ):
"""simple docstring"""
super().setUp()
UpperCAmelCase_: List[Any] = cached_path(
"https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz" , extract_compressed_file=A__ , )
UpperCAmelCase_: Dict = F"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def snake_case_ ( self ):
"""simple docstring"""
MarianMTModel.from_pretrained(A__ )
@slow
@require_torch_gpu
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[str] = {
"$MAX_LEN": 64,
"$BS": 64,
"$GAS": 1,
"$ENRO_DIR": self.data_dir,
"facebook/mbart-large-cc25": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"--learning_rate=3e-5": "--learning_rate 3e-4",
"--num_train_epochs 6": "--num_train_epochs 1",
}
# Clean up bash script
UpperCAmelCase_: int = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py" )[1].strip()
UpperCAmelCase_: List[Any] = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
for k, v in env_vars_to_replace.items():
UpperCAmelCase_: Tuple = bash_script.replace(A__ , str(A__ ) )
UpperCAmelCase_: Tuple = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCAmelCase_: Dict = F"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCAmelCase_: Optional[int] = ["finetune.py"] + bash_script.split() + args
with patch.object(A__ , "argv" , A__ ):
UpperCAmelCase_: List[str] = argparse.ArgumentParser()
UpperCAmelCase_: Union[str, Any] = pl.Trainer.add_argparse_args(A__ )
UpperCAmelCase_: int = SummarizationModule.add_model_specific_args(A__ , os.getcwd() )
UpperCAmelCase_: Tuple = parser.parse_args()
UpperCAmelCase_: Any = main(A__ )
# Check metrics
UpperCAmelCase_: List[str] = load_json(model.metrics_save_path )
UpperCAmelCase_: Any = metrics["val"][0]
UpperCAmelCase_: List[str] = metrics["val"][-1]
self.assertEqual(len(metrics["val"] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , A__ )
self.assertGreater(last_step_stats["val_avg_gen_time"] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["val_avg_gen_time"] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["val_avg_bleu"] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase_: Optional[int] = os.listdir(A__ )
UpperCAmelCase_: Optional[int] = [x for x in contents if x.endswith(".ckpt" )][0]
UpperCAmelCase_: str = os.path.join(args.output_dir , A__ )
UpperCAmelCase_: Any = torch.load(A__ , map_location="cpu" )
UpperCAmelCase_: List[str] = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase_: Tuple = {os.path.basename(A__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
class UpperCAmelCase__ ( snake_case__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Dict = F"{self.test_file_dir_str}/test_data/wmt_en_ro"
UpperCAmelCase_: List[Any] = {
"--fp16_opt_level=O1": "",
"$MAX_LEN": 128,
"$BS": 16,
"$GAS": 1,
"$ENRO_DIR": data_dir,
"$m": "sshleifer/student_marian_en_ro_6_1",
"val_check_interval=0.25": "val_check_interval=1.0",
}
# Clean up bash script
UpperCAmelCase_: Optional[int] = (
(self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py" )[1].strip()
)
UpperCAmelCase_: Dict = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
UpperCAmelCase_: Union[str, Any] = bash_script.replace("--fp16 " , " " )
for k, v in env_vars_to_replace.items():
UpperCAmelCase_: Dict = bash_script.replace(A__ , str(A__ ) )
UpperCAmelCase_: List[str] = self.get_auto_remove_tmp_dir()
UpperCAmelCase_: Union[str, Any] = bash_script.replace("--fp16" , "" )
UpperCAmelCase_: Optional[int] = 6
UpperCAmelCase_: Any = (
["distillation.py"]
+ bash_script.split()
+ [
F"--output_dir={output_dir}",
"--gpus=1",
"--learning_rate=1e-3",
F"--num_train_epochs={epochs}",
"--warmup_steps=10",
"--val_check_interval=1.0",
"--do_predict",
]
)
with patch.object(A__ , "argv" , A__ ):
UpperCAmelCase_: int = argparse.ArgumentParser()
UpperCAmelCase_: Optional[int] = pl.Trainer.add_argparse_args(A__ )
UpperCAmelCase_: str = SummarizationDistiller.add_model_specific_args(A__ , os.getcwd() )
UpperCAmelCase_: Union[str, Any] = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCAmelCase_: str = distill_main(A__ )
# Check metrics
UpperCAmelCase_: int = load_json(model.metrics_save_path )
UpperCAmelCase_: Optional[int] = metrics["val"][0]
UpperCAmelCase_: str = metrics["val"][-1]
assert len(metrics["val"] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , A__ )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase_: Dict = os.listdir(A__ )
UpperCAmelCase_: Dict = [x for x in contents if x.endswith(".ckpt" )][0]
UpperCAmelCase_: Union[str, Any] = os.path.join(args.output_dir , A__ )
UpperCAmelCase_: str = torch.load(A__ , map_location="cpu" )
UpperCAmelCase_: Dict = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase_: List[Any] = {os.path.basename(A__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
| 306
| 1
|
"""simple docstring"""
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , a_ : int )-> str:
"""simple docstring"""
UpperCAmelCase_ : Any = n
UpperCAmelCase_ : str = [None] * self.n
UpperCAmelCase_ : List[Any] = 0 # index of the first element
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Dict = 0
def __len__( self : Union[str, Any] )-> int:
"""simple docstring"""
return self.size
def a ( self : Dict )-> bool:
"""simple docstring"""
return self.size == 0
def a ( self : List[Any] )-> Optional[int]:
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def a ( self : Dict , a_ : int )-> Optional[int]:
"""simple docstring"""
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
UpperCAmelCase_ : List[str] = data
UpperCAmelCase_ : Optional[int] = (self.rear + 1) % self.n
self.size += 1
return self
def a ( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
if self.size == 0:
raise Exception("""UNDERFLOW""" )
UpperCAmelCase_ : Tuple = self.array[self.front]
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = (self.front + 1) % self.n
self.size -= 1
return temp
| 470
|
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowercase_ = "sshleifer/bart-tiny-random"
lowercase_ = "patrickvonplaten/t5-tiny-random"
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self : Dict )-> List[Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(a_ )
def a ( self : List[str] )-> Dict:
"""simple docstring"""
UpperCAmelCase_ ,*UpperCAmelCase_ : Tuple = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def a ( self : List[str] )-> str:
"""simple docstring"""
UpperCAmelCase_ ,*UpperCAmelCase_ : Dict = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
def a ( self : Any )-> List[Any]:
"""simple docstring"""
UpperCAmelCase_ ,*UpperCAmelCase_ : Union[str, Any] = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def a ( self : Tuple )-> str:
"""simple docstring"""
UpperCAmelCase_ ,*UpperCAmelCase_ : str = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def a ( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
with self.assertRaises(a_ ):
create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=a_ , d=a_ )
| 470
| 1
|
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = CpmAntTokenizer
lowerCamelCase_ : Dict = False
def __UpperCAmelCase( self ):
super().setUp()
__A : Dict = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
__A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def __UpperCAmelCase( self ):
__A : int = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
__A : Any = "今天天气真好!"
__A : Optional[Any] = ["今天", "天气", "真", "好", "!"]
__A : Any = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__A : Dict = "今天天气真好!"
__A : Union[str, Any] = [tokenizer.bos_token] + tokens
__A : Union[str, Any] = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
__A : Tuple = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 387
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=__UpperCAmelCase , speech_processor=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , )
def __UpperCAmelCase( self , __UpperCAmelCase = "auto" ):
if slice_size == "auto":
__A : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCAmelCase )
def __UpperCAmelCase( self ):
self.enable_attention_slicing(__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase , __UpperCAmelCase=16_000 , __UpperCAmelCase = 512 , __UpperCAmelCase = 512 , __UpperCAmelCase = 50 , __UpperCAmelCase = 7.5 , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 1 , **__UpperCAmelCase , ):
__A : List[str] = self.speech_processor.feature_extractor(
__UpperCAmelCase , return_tensors="pt" , sampling_rate=__UpperCAmelCase ).input_features.to(self.device )
__A : Any = self.speech_model.generate(__UpperCAmelCase , max_length=480_000 )
__A : List[str] = self.speech_processor.tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , normalize=__UpperCAmelCase )[
0
]
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__A : Optional[Any] = 1
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__A : Dict = len(__UpperCAmelCase )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(__UpperCAmelCase )}." )
# get prompt text embeddings
__A : Optional[int] = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
__A : int = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__A : List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__A : Dict = text_input_ids[:, : self.tokenizer.model_max_length]
__A : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__A , __A , __A : str = text_embeddings.shape
__A : Optional[int] = text_embeddings.repeat(1 , __UpperCAmelCase , 1 )
__A : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__A : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__A : List[str]
if negative_prompt is None:
__A : Dict = [""] * batch_size
elif type(__UpperCAmelCase ) is not type(__UpperCAmelCase ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCAmelCase )} !="
F" {type(__UpperCAmelCase )}." )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__A : Any = [negative_prompt]
elif batch_size != len(__UpperCAmelCase ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCAmelCase )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
__A : int = negative_prompt
__A : int = text_input_ids.shape[-1]
__A : Any = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , )
__A : int = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__A : Union[str, Any] = uncond_embeddings.shape[1]
__A : List[str] = uncond_embeddings.repeat(1 , __UpperCAmelCase , 1 )
__A : int = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__A : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__A : Dict = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__A : Any = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__A : Tuple = torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device="cpu" , dtype=__UpperCAmelCase ).to(
self.device )
else:
__A : List[Any] = torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=__UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
__A : Tuple = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__A : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__A : Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__A : Any = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__A : List[str] = {}
if accepts_eta:
__A : Tuple = eta
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
__A : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__A : Dict = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
# predict the noise residual
__A : List[Any] = self.unet(__UpperCAmelCase , __UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__A , __A : str = noise_pred.chunk(2 )
__A : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__A : Union[str, Any] = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__A : int = 1 / 0.1_82_15 * latents
__A : Union[str, Any] = self.vae.decode(__UpperCAmelCase ).sample
__A : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__A : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__A : List[str] = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCAmelCase , nsfw_content_detected=__UpperCAmelCase )
| 387
| 1
|
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = CodeGenTokenizer
__SCREAMING_SNAKE_CASE : Dict = CodeGenTokenizerFast
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
__SCREAMING_SNAKE_CASE : Any = {'add_prefix_space': True}
__SCREAMING_SNAKE_CASE : List[str] = False
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
lowercase_ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowercase_ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase_ = {"""unk_token""": """<unk>"""}
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase__ ) )
def UpperCAmelCase__ ( self : Optional[int] , **UpperCamelCase__ : Any ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , **UpperCamelCase__ : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = """lower newer"""
lowercase_ = """lower newer"""
return input_text, output_text
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase_ = """lower newer"""
lowercase_ = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowercase_ = tokenizer.tokenize(UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = tokens + [tokenizer.unk_token]
lowercase_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer(add_prefix_space=UpperCamelCase__ )
lowercase_ = """lower newer"""
# Testing tokenization
lowercase_ = tokenizer.tokenize(UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
lowercase_ = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
# Testing conversion to ids without special tokens
lowercase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
# Testing conversion to ids with special tokens
lowercase_ = self.get_rust_tokenizer(add_prefix_space=UpperCamelCase__ )
lowercase_ = tokenizer.encode(UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
# Testing the unknown token
lowercase_ = tokens + [rust_tokenizer.unk_token]
lowercase_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Optional[Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
# Simple input
lowercase_ = """This is a simple input"""
lowercase_ = ["""This is a simple input 1""", """This is a simple input 2"""]
lowercase_ = ("""This is a simple input""", """This is a pair""")
lowercase_ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" )
# Simple input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" )
# Simple input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" , )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" )
# Pair input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" , )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
lowercase_ = """This is a simple input"""
lowercase_ = ["""This is a simple input looooooooong""", """This is a simple input"""]
lowercase_ = ("""This is a simple input""", """This is a pair""")
lowercase_ = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
lowercase_ = tokenizer.pad_token_id
lowercase_ = tokenizer(UpperCamelCase__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
lowercase_ = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , truncate=UpperCamelCase__ , return_tensors="""np""" )
lowercase_ = tokenizer(*UpperCamelCase__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
lowercase_ = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , truncate=UpperCamelCase__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = """$$$"""
lowercase_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=UpperCamelCase__ , add_bos_token=UpperCamelCase__ )
lowercase_ = """This is a simple input"""
lowercase_ = ["""This is a simple input 1""", """This is a simple input 2"""]
lowercase_ = tokenizer.bos_token_id
lowercase_ = tokenizer(UpperCamelCase__ )
lowercase_ = tokenizer(UpperCamelCase__ )
self.assertEqual(out_s.input_ids[0] , UpperCamelCase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase_ = tokenizer.decode(out_s.input_ids )
lowercase_ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , UpperCamelCase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
lowercase_ = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
lowercase_ = """\nif len_a > len_b: result = a\nelse: result = b"""
lowercase_ = tokenizer.encode(UpperCamelCase__ )
lowercase_ = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
lowercase_ = tokenizer.decode(UpperCamelCase__ , truncate_before_pattern=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
| 412
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a = 2
class UpperCamelCase__ :
def __init__( self : Any , *, # begin keyword-only arguments
UpperCamelCase__ : Any="<s>" , UpperCamelCase__ : int="<pad>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : int="<unk>" , UpperCamelCase__ : Dict=None , ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = bos, unk, pad, eos
lowercase_ = []
lowercase_ = []
lowercase_ = {}
lowercase_ = self.add_symbol(UpperCamelCase__ )
lowercase_ = self.add_symbol(UpperCamelCase__ )
lowercase_ = self.add_symbol(UpperCamelCase__ )
lowercase_ = self.add_symbol(UpperCamelCase__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCamelCase__ )
lowercase_ = len(self.symbols )
def __eq__( self : int , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.symbols )
def __contains__( self : Any , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return sym in self.indices
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
lowercase_ = cls()
d.add_from_file(UpperCamelCase__ )
return d
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : Any=False ):
'''simple docstring'''
if word in self.indices and not overwrite:
lowercase_ = self.indices[word]
lowercase_ = self.count[idx] + n
return idx
else:
lowercase_ = len(self.symbols )
lowercase_ = idx
self.symbols.append(UpperCamelCase__ )
self.count.append(UpperCamelCase__ )
return idx
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return 0
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
try:
with open(UpperCamelCase__ , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(UpperCamelCase__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(UpperCamelCase__ ) )
return
lowercase_ = f.readlines()
lowercase_ = self._load_meta(UpperCamelCase__ )
for line in lines[indices_start_line:]:
try:
lowercase_ , lowercase_ = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
lowercase_ = True
lowercase_ , lowercase_ = line.rsplit(""" """ , 1 )
else:
lowercase_ = False
lowercase_ = int(UpperCamelCase__ )
lowercase_ = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(UpperCamelCase__ ) )
self.add_symbol(UpperCamelCase__ , n=UpperCamelCase__ , overwrite=UpperCamelCase__ )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowercase_ = dict((re.sub(r"""@@$""" , """""" , UpperCAmelCase__ ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , UpperCAmelCase__ ), v) for k, v in d.items() )
lowercase_ = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
lowercase_ = d[k] # restore
return da
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
# prep
if not os.path.exists(UpperCAmelCase__ ):
raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowercase_ = os.path.join(UpperCAmelCase__ , """checkpoint.pt""" )
if not os.path.isfile(UpperCAmelCase__ ):
raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' )
lowercase_ = torch.load(UpperCAmelCase__ , map_location="""cpu""" )
lowercase_ = chkpt["""cfg"""]["""model"""]
# dicts
lowercase_ = os.path.join(UpperCAmelCase__ , """dict.txt""" )
if not os.path.isfile(UpperCAmelCase__ ):
raise ValueError(F'''path to the file {dict_file} does not exist!''' )
lowercase_ = Dictionary.load(UpperCAmelCase__ )
lowercase_ = rewrite_dict_keys(src_dict.indices )
lowercase_ = len(UpperCAmelCase__ )
lowercase_ = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ , indent=UpperCAmelCase__ ) )
# merges_file (bpecodes)
lowercase_ = os.path.join(UpperCAmelCase__ , """bpecodes""" )
if not os.path.isfile(UpperCAmelCase__ ):
raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' )
lowercase_ = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(UpperCAmelCase__ , UpperCAmelCase__ )
# model config
lowercase_ = os.path.join(UpperCAmelCase__ , """config.json""" )
lowercase_ = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'''Generating {biogpt_model_config_file}''' )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ , indent=UpperCAmelCase__ ) )
# tokenizer config
lowercase_ = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1_0_2_4,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'''Generating {biogpt_tokenizer_config_file}''' )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ , indent=UpperCAmelCase__ ) )
# model
lowercase_ = chkpt["""model"""]
# remove unneeded keys
lowercase_ = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
lowercase_ = model_state_dict.pop(UpperCAmelCase__ )
else:
lowercase_ = model_state_dict.pop(UpperCAmelCase__ )
lowercase_ = BioGptConfig.from_pretrained(UpperCAmelCase__ )
lowercase_ = BioGptForCausalLM(UpperCAmelCase__ )
# check that it loads ok
model_new.load_state_dict(UpperCAmelCase__ )
# save
lowercase_ = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
print("""Conversion is done!""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 412
| 1
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
snake_case_ : Union[str, Any] = logging.getLogger()
def A () -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCAmelCase_ = parser.parse_args()
return args.f
class __snake_case ( a ):
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(_snake_case)
def lowerCamelCase ( self : str , _snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''')
with patch.object(_snake_case , '''argv''' , _snake_case):
UpperCAmelCase_ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_snake_case , 0.6_6_6)
@slow
@require_torch_non_multi_gpu
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(_snake_case)
UpperCAmelCase_ = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(_snake_case)
UpperCAmelCase_ = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(_snake_case)
| 169
|
from __future__ import annotations
def A (__A : list[int] ) -> list[int]: # This function is recursive
"""simple docstring"""
UpperCAmelCase_ = len(__A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
UpperCAmelCase_ = array[0]
UpperCAmelCase_ = False
UpperCAmelCase_ = 1
UpperCAmelCase_ = []
while not is_found and i < array_length:
if array[i] < pivot:
UpperCAmelCase_ = True
UpperCAmelCase_ = [element for element in array[i:] if element >= array[i]]
UpperCAmelCase_ = longest_subsequence(__A )
if len(__A ) > len(__A ):
UpperCAmelCase_ = temp_array
else:
i += 1
UpperCAmelCase_ = [element for element in array[1:] if element >= pivot]
UpperCAmelCase_ = [pivot, *longest_subsequence(__A )]
if len(__A ) > len(__A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 169
| 1
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ ):
snake_case__ : Optional[int] = 1
@register_to_config
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int = 1_0_0_0 , SCREAMING_SNAKE_CASE__ : Optional[Union[np.ndarray, List[float]]] = None ) -> Any:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(SCREAMING_SNAKE_CASE__ )
# standard deviation of the initial noise distribution
a_ : Dict = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
a_ : Union[str, Any] = 4
# running values
a_ : Any = []
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, torch.device] = None ) -> Tuple:
a_ : Dict = num_inference_steps
a_ : int = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
a_ : Optional[int] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
a_ : List[Any] = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
a_ : Any = torch.sin(steps * math.pi / 2 ) ** 2
a_ : Tuple = (1.0 - self.betas**2) ** 0.5
a_ : int = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
a_ : Optional[Any] = timesteps.to(SCREAMING_SNAKE_CASE__ )
a_ : Any = []
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
a_ : Optional[Any] = (self.timesteps == timestep).nonzero().item()
a_ : Any = timestep_index + 1
a_ : Dict = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(SCREAMING_SNAKE_CASE__ )
if len(self.ets ) == 1:
a_ : Dict = self.ets[-1]
elif len(self.ets ) == 2:
a_ : Any = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
a_ : List[Any] = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2
else:
a_ : List[str] = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4])
a_ : Dict = self._get_prev_sample(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[str] ) -> torch.FloatTensor:
return sample
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[str]:
a_ : List[Any] = self.alphas[timestep_index]
a_ : int = self.betas[timestep_index]
a_ : Tuple = self.alphas[prev_timestep_index]
a_ : Tuple = self.betas[prev_timestep_index]
a_ : Any = (sample - sigma * ets) / max(SCREAMING_SNAKE_CASE__ , 1E-8 )
a_ : List[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 570
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {'vocab_file': 'vocab.txt'}
UpperCAmelCase_ : Tuple = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/esm2_t6_8M_UR50D': 1024,
'facebook/esm2_t12_35M_UR50D': 1024,
}
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
with open(__A , 'r' ) as f:
a_ : int = f.read().splitlines()
return [l.strip() for l in lines]
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Tuple = VOCAB_FILES_NAMES
snake_case__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]="<unk>" , SCREAMING_SNAKE_CASE__ : str="<cls>" , SCREAMING_SNAKE_CASE__ : List[str]="<pad>" , SCREAMING_SNAKE_CASE__ : List[Any]="<mask>" , SCREAMING_SNAKE_CASE__ : int="<eos>" , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = load_vocab_file(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = dict(enumerate(self.all_tokens ) )
a_ : Any = {tok: ind for ind, tok in enumerate(self.all_tokens )}
a_ : List[str] = unk_token
a_ : Optional[int] = cls_token
a_ : List[str] = pad_token
a_ : Optional[Any] = mask_token
a_ : Dict = eos_token
a_ : Dict = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> str:
return self._id_to_token.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> int:
return self._token_to_id.get(SCREAMING_SNAKE_CASE__ , self._token_to_id.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Union[str, Any]:
return text.split()
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : str=False ) -> List[str]:
return len(self._id_to_token )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
return {token: i for i, token in enumerate(self.all_tokens )}
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str ) -> int:
return self._token_to_id.get(SCREAMING_SNAKE_CASE__ , self._token_to_id.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> str:
return self._id_to_token.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
a_ : Any = [self.cls_token_id]
a_ : Tuple = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : List , SCREAMING_SNAKE_CASE__ : Optional[List] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
a_ : List[str] = [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(SCREAMING_SNAKE_CASE__ ) + [1]
return mask
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
a_ : str = os.path.join(SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.get_vocab_size(with_added_tokens=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[List[str], List[AddedToken]] , SCREAMING_SNAKE_CASE__ : bool = False ) -> int:
return super()._add_tokens(SCREAMING_SNAKE_CASE__ , special_tokens=SCREAMING_SNAKE_CASE__ )
| 570
| 1
|
from bisect import bisect
from itertools import accumulate
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = sorted(zip(snake_case__ ,snake_case__ ) ,key=lambda snake_case__ : x[0] / x[1] ,reverse=snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = [i[0] for i in r], [i[1] for i in r]
_SCREAMING_SNAKE_CASE = list(accumulate(snake_case__ ) )
_SCREAMING_SNAKE_CASE = bisect(snake_case__ ,snake_case__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 569
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_SCREAMING_SNAKE_CASE = []
for char_count in range(snake_case__ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(snake_case__ )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
| 569
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.