code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
UpperCamelCase__ : List[str] = None
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Tuple = "▁"
UpperCamelCase__ : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__ : Tuple = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
UpperCamelCase__ : Any = {
"google/pegasus-xsum": 512,
}
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , A__=None , A__=None , A__="<pad>" , A__="</s>" , A__="<unk>" , A__="<mask_2>" , A__="<mask_1>" , A__=None , A__=1_03 , **A__ , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = offset
if additional_special_tokens is not None:
if not isinstance(A__ , A__ ):
raise TypeError(
F"additional_special_tokens should be of type {type(A__ )}, but is"
F" {type(A__ )}" )
_SCREAMING_SNAKE_CASE = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"<unk_{i}>" for i in range(len(A__ ) , self.offset - 1 )
]
if len(set(A__ ) ) != len(A__ ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
_SCREAMING_SNAKE_CASE = additional_special_tokens_extended
else:
_SCREAMING_SNAKE_CASE = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset )]
super().__init__(
A__ , tokenizer_file=A__ , pad_token=A__ , eos_token=A__ , unk_token=A__ , mask_token=A__ , mask_token_sent=A__ , offset=A__ , additional_special_tokens=A__ , **A__ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def UpperCamelCase ( self , A__ ) -> str:
_SCREAMING_SNAKE_CASE = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
F" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}" )
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCamelCase ( self , A__ , A__ = None , A__ = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(A__ )
elif token_ids_a is None:
return self._special_token_mask(A__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCamelCase ( self , A__ , A__=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_SCREAMING_SNAKE_CASE = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ):
copyfile(self.vocab_file , A__ )
return (out_vocab_file,)
| 591
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : int = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cvt'
def __init__( self , A__=3 , A__=[7, 3, 3] , A__=[4, 2, 2] , A__=[2, 1, 1] , A__=[64, 1_92, 3_84] , A__=[1, 3, 6] , A__=[1, 2, 10] , A__=[4.0, 4.0, 4.0] , A__=[0.0, 0.0, 0.0] , A__=[0.0, 0.0, 0.0] , A__=[0.0, 0.0, 0.1] , A__=[True, True, True] , A__=[False, False, True] , A__=["dw_bn", "dw_bn", "dw_bn"] , A__=[3, 3, 3] , A__=[1, 1, 1] , A__=[2, 2, 2] , A__=[1, 1, 1] , A__=[1, 1, 1] , A__=0.02 , A__=1E-12 , **A__ , ) -> Dict:
super().__init__(**A__ )
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = patch_sizes
_SCREAMING_SNAKE_CASE = patch_stride
_SCREAMING_SNAKE_CASE = patch_padding
_SCREAMING_SNAKE_CASE = embed_dim
_SCREAMING_SNAKE_CASE = num_heads
_SCREAMING_SNAKE_CASE = depth
_SCREAMING_SNAKE_CASE = mlp_ratio
_SCREAMING_SNAKE_CASE = attention_drop_rate
_SCREAMING_SNAKE_CASE = drop_rate
_SCREAMING_SNAKE_CASE = drop_path_rate
_SCREAMING_SNAKE_CASE = qkv_bias
_SCREAMING_SNAKE_CASE = cls_token
_SCREAMING_SNAKE_CASE = qkv_projection_method
_SCREAMING_SNAKE_CASE = kernel_qkv
_SCREAMING_SNAKE_CASE = padding_kv
_SCREAMING_SNAKE_CASE = stride_kv
_SCREAMING_SNAKE_CASE = padding_q
_SCREAMING_SNAKE_CASE = stride_q
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
| 591
| 1
|
def __a ( __UpperCAmelCase : Any , __UpperCAmelCase : Dict ) -> str:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __a ( __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]=0 ) -> Union[str, Any]:
"""simple docstring"""
return sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x[column] )
def __a ( __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any]=float("inf" ) ) -> Dict:
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , __UpperCAmelCase ):
lowerCamelCase_ : List[str] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCamelCase_ : List[str] = current_dis
return min_dis
def __a ( __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any=float("inf" ) ) -> Optional[int]:
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , __UpperCAmelCase ):
for j in range(max(0 , i - 6 ) , __UpperCAmelCase ):
lowerCamelCase_ : Optional[int] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCamelCase_ : int = current_dis
return min_dis
def __a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(__UpperCAmelCase , __UpperCAmelCase )
# recursion
lowerCamelCase_ : List[Any] = points_counts // 2
lowerCamelCase_ : Optional[int] = closest_pair_of_points_sqr(
__UpperCAmelCase , points_sorted_on_y[:mid] , __UpperCAmelCase )
lowerCamelCase_ : Tuple = closest_pair_of_points_sqr(
__UpperCAmelCase , points_sorted_on_y[mid:] , points_counts - mid )
lowerCamelCase_ : Optional[int] = min(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : int = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__UpperCAmelCase )
lowerCamelCase_ : Dict = dis_between_closest_in_strip(
__UpperCAmelCase , len(__UpperCAmelCase ) , __UpperCAmelCase )
return min(__UpperCAmelCase , __UpperCAmelCase )
def __a ( __UpperCAmelCase : Dict , __UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Dict = column_based_sort(__UpperCAmelCase , column=0 )
lowerCamelCase_ : int = column_based_sort(__UpperCAmelCase , column=1 )
return (
closest_pair_of_points_sqr(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
) ** 0.5
if __name__ == "__main__":
snake_case_ : Optional[int] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 253
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str]=False ) -> Any:
"""simple docstring"""
try:
lowerCamelCase_ : Any = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCamelCase_ : Dict = default
else:
# KEY is set, convert it to True or False.
try:
lowerCamelCase_ : Any = strtobool(__UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
snake_case_ : Dict = parse_flag_from_env("RUN_SLOW", default=False)
snake_case_ : int = parse_flag_from_env("RUN_REMOTE", default=False)
snake_case_ : List[str] = parse_flag_from_env("RUN_LOCAL", default=True)
snake_case_ : int = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
snake_case_ : List[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
snake_case_ : Tuple = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
snake_case_ : List[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
snake_case_ : Dict = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
snake_case_ : List[Any] = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
snake_case_ : Dict = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
snake_case_ : int = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def __a ( __UpperCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
lowerCamelCase_ : Dict = unittest.skip("test requires faiss" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
lowerCamelCase_ : Union[str, Any] = unittest.skip("test requires regex" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
lowerCamelCase_ : Dict = unittest.skip("test requires elasticsearch" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
lowerCamelCase_ : Optional[int] = unittest.skip("test requires sqlalchemy" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
lowerCamelCase_ : Dict = unittest.skip("test requires PyTorch" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if not config.TF_AVAILABLE:
lowerCamelCase_ : Optional[Any] = unittest.skip("test requires TensorFlow" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : int ) -> Any:
"""simple docstring"""
if not config.JAX_AVAILABLE:
lowerCamelCase_ : Tuple = unittest.skip("test requires JAX" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
if not config.PIL_AVAILABLE:
lowerCamelCase_ : int = unittest.skip("test requires Pillow" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(__UpperCAmelCase )
else:
return test_case
def __a ( __UpperCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(__UpperCAmelCase )
else:
return test_case
def __a ( __UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(__UpperCAmelCase )
else:
return test_case
def __a ( __UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
def _require_spacy_model(__UpperCAmelCase : Tuple ):
try:
import spacy # noqa F401
spacy.load(__UpperCAmelCase )
except ImportError:
return unittest.skip("test requires spacy" )(__UpperCAmelCase )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(__UpperCAmelCase ) )(__UpperCAmelCase )
else:
return test_case
return _require_spacy_model
def __a ( __UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(__UpperCAmelCase )
else:
return test_case
def __a ( __UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(__UpperCAmelCase )
else:
return test_case
def __a ( __UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
lowerCamelCase_ : Any = unittest.skip("test is slow" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
lowerCamelCase_ : List[Any] = unittest.skip("test is local" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
lowerCamelCase_ : Union[str, Any] = unittest.skip("test is packaged" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : Dict ) -> str:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
lowerCamelCase_ : Any = unittest.skip("test requires remote" )(__UpperCAmelCase )
return test_case
def __a ( *__UpperCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
def decorate(cls : int ):
for name, fn in cls.__dict__.items():
if callable(__UpperCAmelCase ) and name.startswith("test" ):
for decorator in decorators:
lowerCamelCase_ : int = decorator(__UpperCAmelCase )
setattr(cls , __UpperCAmelCase , __UpperCAmelCase )
return cls
return decorate
class snake_case_ ( __A ):
'''simple docstring'''
pass
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = 0
lowerCamelCase = 1
lowerCamelCase = 2
@contextmanager
def __a ( __UpperCAmelCase : Tuple=OfflineSimulationMode.CONNECTION_FAILS , __UpperCAmelCase : Any=1e-16 ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = requests.Session().request
def timeout_request(__UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Optional[int] ):
# Change the url to an invalid url so that the connection hangs
lowerCamelCase_ : str = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
lowerCamelCase_ : List[Any] = timeout
try:
return online_request(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowerCamelCase_ : Any = url
lowerCamelCase_ : Tuple = e.args[0]
lowerCamelCase_ : Union[str, Any] = (max_retry_error.args[0].replace("10.255.255.1" , f"OfflineMock[{url}]" ),)
lowerCamelCase_ : str = (max_retry_error,)
raise
def raise_connection_error(__UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , **__UpperCAmelCase : Dict ):
raise requests.ConnectionError("Offline mode is enabled." , request=__UpperCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , __UpperCAmelCase ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def __a ( *__UpperCAmelCase : int , **__UpperCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : int = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCAmelCase , **__UpperCAmelCase ) as tmp_dir:
try:
os.chdir(__UpperCAmelCase )
yield
finally:
os.chdir(__UpperCAmelCase )
@contextmanager
def __a ( ) -> Union[str, Any]:
"""simple docstring"""
import gc
gc.collect()
lowerCamelCase_ : Tuple = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __a ( ) -> int:
"""simple docstring"""
import gc
gc.collect()
lowerCamelCase_ : Optional[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __a ( __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return deepcopy(__UpperCAmelCase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__UpperCAmelCase ).integers(0 , 100 , 10 ).tolist()
def __a ( __UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(__UpperCAmelCase : Any , *__UpperCAmelCase : Dict , **__UpperCAmelCase : str ):
try:
return func(*__UpperCAmelCase , **__UpperCAmelCase )
except HTTPError as err:
if str(__UpperCAmelCase ).startswith("500" ) or str(__UpperCAmelCase ).startswith("502" ):
pytest.xfail(str(__UpperCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCAmelCase )
class snake_case_ :
'''simple docstring'''
def __init__( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str] ) -> Any:
lowerCamelCase_ : int = returncode
lowerCamelCase_ : int = stdout
lowerCamelCase_ : Union[str, Any] = stderr
async def __a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
while True:
lowerCamelCase_ : List[str] = await stream.readline()
if line:
callback(__UpperCAmelCase )
else:
break
async def __a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Optional[int]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print("\nRunning: " , " ".join(__UpperCAmelCase ) )
lowerCamelCase_ : Dict = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCamelCase_ : Optional[Any] = []
lowerCamelCase_ : Optional[Any] = []
def tee(__UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]="" ):
lowerCamelCase_ : Optional[int] = line.decode("utf-8" ).rstrip()
sink.append(__UpperCAmelCase )
if not quiet:
print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label="stderr:" ) ),
] , timeout=__UpperCAmelCase , )
return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase )
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[Any]=180 , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Dict=True ) -> _RunOutput:
"""simple docstring"""
lowerCamelCase_ : List[str] = asyncio.get_event_loop()
lowerCamelCase_ : Tuple = loop.run_until_complete(
_stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) )
lowerCamelCase_ : Tuple = " ".join(__UpperCAmelCase )
if result.returncode > 0:
lowerCamelCase_ : int = "\n".join(result.stderr )
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output." )
return result
def __a ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
lowerCamelCase_ : Optional[Any] = re.sub(R"^gw" , "" , __UpperCAmelCase , 0 , re.M )
return int(__UpperCAmelCase )
def __a ( ) -> int:
"""simple docstring"""
lowerCamelCase_ : int = 29500
lowerCamelCase_ : int = pytest_xdist_worker_id()
return port + uniq_delta
| 253
| 1
|
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowercase_ = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
lowercase_ = {
"facebook/blenderbot_small-90M": 512,
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Dict = VOCAB_FILES_NAMES
__lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : int = BlenderbotSmallTokenizer
def __init__(self , A=None , A=None , A="<|endoftext|>" , A="<|endoftext|>" , A="<|endoftext|>" , A=False , A=True , **A , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=A , merges=A , add_prefix_space=A , trim_offsets=A , ) , bos_token=A , eos_token=A , unk_token=A , **A , )
_a = add_prefix_space
def a__ (self , A , A=None ) -> List[Any]:
"""simple docstring"""
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a__ (self , A , A = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 11
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
class __A :
def __init__( self , a__ ):
_lowerCAmelCase : list[dict] = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(a__ )
self.set_fail_transitions()
def __A ( self , a__ , a__ ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = 0
for character in keyword:
_lowerCAmelCase : str = self.find_next_state(a__ , a__ )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
_lowerCAmelCase : List[str] = len(self.adlist ) - 1
else:
_lowerCAmelCase : Any = next_state
self.adlist[current_state]["output"].append(a__ )
def __A ( self ):
_lowerCAmelCase : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(a__ )
_lowerCAmelCase : str = 0
while q:
_lowerCAmelCase : Optional[Any] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(a__ )
_lowerCAmelCase : Tuple = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(a__ , self.adlist[child]["""value"""] ) is None
and state != 0
):
_lowerCAmelCase : List[Any] = self.adlist[state]["""fail_state"""]
_lowerCAmelCase : Optional[int] = self.find_next_state(
a__ , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def __A ( self , a__ ):
_lowerCAmelCase : dict = {} # returns a dict with keywords and list of its occurrences
_lowerCAmelCase : Any = 0
for i in range(len(a__ ) ):
while (
self.find_next_state(a__ , string[i] ) is None
and current_state != 0
):
_lowerCAmelCase : Any = self.adlist[current_state]["""fail_state"""]
_lowerCAmelCase : List[Any] = self.find_next_state(a__ , string[i] )
if next_state is None:
_lowerCAmelCase : Optional[Any] = 0
else:
_lowerCAmelCase : Optional[int] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_lowerCAmelCase : List[Any] = []
result[key].append(i - len(a__ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 213
| 0
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {}
_lowerCamelCase = {}
_lowerCamelCase = {}
def lowerCamelCase ( UpperCAmelCase_ : type , UpperCAmelCase_ : Optional[str] , UpperCAmelCase_ : Optional[List[str]] = None , )-> str:
"""simple docstring"""
a =aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
a =formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
a =format_type
def lowerCamelCase ( UpperCAmelCase_ : Exception , UpperCAmelCase_ : Optional[str] , UpperCAmelCase_ : Optional[List[str]] = None )-> Dict:
"""simple docstring"""
a =aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
a =unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
_lowerCamelCase = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
_lowerCamelCase = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
_lowerCamelCase = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def lowerCamelCase ( UpperCAmelCase_ : Optional[str] )-> Optional[str]:
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def lowerCamelCase ( UpperCAmelCase_ : Optional[str] , **UpperCAmelCase_ : int )-> Formatter:
"""simple docstring"""
a =get_format_type_from_alias(UpperCAmelCase_ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCAmelCase_ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 321
|
from __future__ import annotations
from collections.abc import Callable
def lowerCamelCase ( UpperCAmelCase_ : Callable[[int | float], int | float] , UpperCAmelCase_ : int | float , UpperCAmelCase_ : int | float , UpperCAmelCase_ : int = 100 , )-> float:
"""simple docstring"""
a =x_start
a =fnc(UpperCAmelCase_ )
a =0.0
for _ in range(UpperCAmelCase_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
a =(x_end - x_start) / steps + xa
a =fnc(UpperCAmelCase_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
a =xa
a =fxa
return area
if __name__ == "__main__":
def lowerCamelCase ( UpperCAmelCase_ : List[str] )-> Optional[Any]:
"""simple docstring"""
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
_lowerCamelCase = 10
while i <= 100000:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 321
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[str] = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : int = '''vivit'''
def __init__( self : List[str] , _snake_case : Optional[Any]=224 , _snake_case : Dict=32 , _snake_case : Optional[Any]=[2, 16, 16] , _snake_case : Optional[int]=3 , _snake_case : List[str]=768 , _snake_case : Optional[int]=12 , _snake_case : List[str]=12 , _snake_case : Optional[int]=3072 , _snake_case : Optional[Any]="gelu_fast" , _snake_case : int=0.0 , _snake_case : Any=0.0 , _snake_case : List[str]=0.02 , _snake_case : List[str]=1E-0_6 , _snake_case : Any=True , **_snake_case : Dict , ):
__lowercase : List[Any] = hidden_size
__lowercase : Dict = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : Dict = intermediate_size
__lowercase : Optional[Any] = hidden_act
__lowercase : Dict = hidden_dropout_prob
__lowercase : Optional[Any] = attention_probs_dropout_prob
__lowercase : str = initializer_range
__lowercase : List[Any] = layer_norm_eps
__lowercase : Optional[int] = image_size
__lowercase : Tuple = num_frames
__lowercase : str = tubelet_size
__lowercase : List[Any] = num_channels
__lowercase : Optional[Any] = qkv_bias
super().__init__(**_snake_case )
| 509
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Optional[int] = '''rwkv'''
A__ : int = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : List[str] , _snake_case : List[Any]=5_0277 , _snake_case : List[Any]=1024 , _snake_case : Optional[int]=4096 , _snake_case : str=32 , _snake_case : Dict=None , _snake_case : Any=None , _snake_case : str=1E-5 , _snake_case : str=0 , _snake_case : Union[str, Any]=0 , _snake_case : List[Any]=6 , _snake_case : Any=False , _snake_case : int=True , **_snake_case : Optional[Any] , ):
__lowercase : Dict = vocab_size
__lowercase : Tuple = context_length
__lowercase : str = hidden_size
__lowercase : Tuple = num_hidden_layers
__lowercase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size
__lowercase : Dict = intermediate_size if intermediate_size is not None else 4 * hidden_size
__lowercase : Optional[Any] = layer_norm_epsilon
__lowercase : List[str] = rescale_every
__lowercase : Union[str, Any] = use_cache
__lowercase : Dict = bos_token_id
__lowercase : Optional[int] = eos_token_id
super().__init__(
tie_word_embeddings=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
| 509
| 1
|
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {'''vocab_file''': '''spiece.model'''}
__A = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
__A = {
'''google/bigbird-roberta-base''': 4_0_9_6,
'''google/bigbird-roberta-large''': 4_0_9_6,
'''google/bigbird-base-trivia-itc''': 4_0_9_6,
}
class a_ ( UpperCamelCase_ ):
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
_snake_case = []
def __init__(self , __a , __a="<unk>" , __a="<s>" , __a="</s>" , __a="<pad>" , __a="[SEP]" , __a="[MASK]" , __a="[CLS]" , __a = None , **__a , ) -> None:
"""simple docstring"""
__snake_case : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a) if isinstance(__a , __a) else bos_token
__snake_case : List[str] = AddedToken(__a , lstrip=__a , rstrip=__a) if isinstance(__a , __a) else eos_token
__snake_case : Optional[int] = AddedToken(__a , lstrip=__a , rstrip=__a) if isinstance(__a , __a) else unk_token
__snake_case : List[str] = AddedToken(__a , lstrip=__a , rstrip=__a) if isinstance(__a , __a) else pad_token
__snake_case : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a) if isinstance(__a , __a) else cls_token
__snake_case : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a) if isinstance(__a , __a) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__snake_case : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a) if isinstance(__a , __a) else mask_token
__snake_case : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , sep_token=__a , mask_token=__a , cls_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__snake_case : Optional[int] = vocab_file
__snake_case : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(__a)
@property
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = {self.convert_ids_to_tokens(__a): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self) -> Tuple:
"""simple docstring"""
__snake_case : str = self.__dict__.copy()
__snake_case : Optional[Any] = None
return state
def __setstate__(self , __a) -> Tuple:
"""simple docstring"""
__snake_case : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
__snake_case : int = {}
__snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__a , out_type=__a)
def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]:
"""simple docstring"""
return self.sp_model.piece_to_id(__a)
def SCREAMING_SNAKE_CASE__ (self , __a) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.sp_model.IdToPiece(__a)
return token
def SCREAMING_SNAKE_CASE__ (self , __a) -> Optional[int]:
"""simple docstring"""
__snake_case : str = []
__snake_case : Dict = ''
__snake_case : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a) + token
__snake_case : int = True
__snake_case : str = []
else:
current_sub_tokens.append(__a)
__snake_case : Any = False
out_string += self.sp_model.decode(__a)
return out_string.strip()
def SCREAMING_SNAKE_CASE__ (self , __a , __a = False , __a = None , __a = True , **__a , ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = kwargs.pop('use_source_tokenizer' , __a)
__snake_case : List[Any] = self.convert_ids_to_tokens(__a , skip_special_tokens=__a)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__snake_case : Tuple = []
__snake_case : str = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__a))
__snake_case : Optional[int] = []
sub_texts.append(__a)
else:
current_sub_text.append(__a)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__a))
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
__snake_case : List[str] = re.sub(R' (\[(MASK|SEP)\])' , R'\1' , ' '.join(__a))
else:
__snake_case : Optional[Any] = ''.join(__a)
__snake_case : Optional[int] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__snake_case : Union[str, Any] = self.clean_up_tokenization(__a)
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE__ (self , __a , __a = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__a):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
__snake_case : Any = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(__a) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __a)
elif not os.path.isfile(self.vocab_file):
with open(__a , 'wb') as fi:
__snake_case : int = self.sp_model.serialized_model_proto()
fi.write(__a)
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE__ (self , __a , __a = None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Any = [self.cls_token_id]
__snake_case : int = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ (self , __a , __a = None , __a = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a)
if token_ids_a is None:
return [1] + ([0] * len(__a)) + [1]
return [1] + ([0] * len(__a)) + [1] + ([0] * len(__a)) + [1]
def SCREAMING_SNAKE_CASE__ (self , __a , __a = None) -> List[int]:
"""simple docstring"""
__snake_case : Dict = [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
| 61
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 61
| 1
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
a : Union[str, Any] = logging.getLogger(__name__)
a : Any = {'''facebook/bart-base''': BartForConditionalGeneration}
a : int = {'''facebook/bart-base''': BartTokenizer}
def lowercase_ ( ):
'''simple docstring'''
__lowercase = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=_UpperCamelCase , default=_UpperCamelCase , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=_UpperCamelCase , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=_UpperCamelCase , default=_UpperCamelCase , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=_UpperCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_UpperCamelCase , )
parser.add_argument(
'''--config_name''' , type=_UpperCamelCase , default=_UpperCamelCase , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=_UpperCamelCase , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=_UpperCamelCase , default=_UpperCamelCase , help='''Where to store the final ONNX file.''' )
__lowercase = parser.parse_args()
return args
def lowercase_ ( _UpperCamelCase , _UpperCamelCase="cpu" ):
'''simple docstring'''
__lowercase = model_dict[model_name].from_pretrained(_UpperCamelCase ).to(_UpperCamelCase )
__lowercase = tokenizer_dict[model_name].from_pretrained(_UpperCamelCase )
if model_name in ["facebook/bart-base"]:
__lowercase = 0
__lowercase = None
__lowercase = 0
return huggingface_model, tokenizer
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
model.eval()
__lowercase = None
__lowercase = torch.jit.script(BARTBeamSearchGenerator(_UpperCamelCase ) )
with torch.no_grad():
__lowercase = '''My friends are cool but they eat too many carbs.'''
__lowercase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors='''pt''' ).to(model.device )
__lowercase = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=_UpperCamelCase , max_length=_UpperCamelCase , early_stopping=_UpperCamelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_UpperCamelCase , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _UpperCamelCase , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=_UpperCamelCase , )
logger.info('''Model exported to {}'''.format(_UpperCamelCase ) )
__lowercase = remove_dup_initializers(os.path.abspath(_UpperCamelCase ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(_UpperCamelCase ) )
__lowercase = onnxruntime.InferenceSession(_UpperCamelCase )
__lowercase = ort_sess.run(
_UpperCamelCase , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(_UpperCamelCase ),
'''max_length''': np.array(_UpperCamelCase ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def lowercase_ ( ):
'''simple docstring'''
__lowercase = parse_args()
__lowercase = 5
__lowercase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
__lowercase = torch.device(args.device )
__lowercase , __lowercase = load_model_tokenizer(args.model_name_or_path , _UpperCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(_UpperCamelCase )
if args.max_length:
__lowercase = args.max_length
if args.num_beams:
__lowercase = args.num_beams
if args.output_file_path:
__lowercase = args.output_file_path
else:
__lowercase = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 639
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=1_8 , snake_case_=3_0 , snake_case_=4_0_0 , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size if size is not None else {'''height''': 1_8, '''width''': 2_0}
__lowercase = do_thumbnail
__lowercase = do_align_axis
__lowercase = do_pad
__lowercase = do_normalize
__lowercase = image_mean
__lowercase = image_std
def A ( self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase_ ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase = DonutImageProcessor if is_vision_available() else None
def A ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = DonutImageProcessingTester(self )
@property
def A ( self ) -> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , '''do_resize''' ) )
self.assertTrue(hasattr(snake_case_ , '''size''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_thumbnail''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_pad''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_normalize''' ) )
self.assertTrue(hasattr(snake_case_ , '''image_mean''' ) )
self.assertTrue(hasattr(snake_case_ , '''image_std''' ) )
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 2_0} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} )
# Previous config had dimensions in (width, height) order
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=(4_2, 8_4) )
self.assertEqual(image_processor.size , {'''height''': 8_4, '''width''': 4_2} )
def A ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@is_flaky()
def A ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase = image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def A ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase = image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def A ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase = image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 639
| 1
|
def A ( a_ ,a_ ) -> list:
__UpperCamelCase : List[str] =word.split()
def justify(a_ ,a_ ,a_ ) -> str:
__UpperCamelCase : Any =max_width - width
__UpperCamelCase : Any =len(a_ )
if len(a_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
__UpperCamelCase : Union[str, Any] =words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
__UpperCamelCase : Any =spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
__UpperCamelCase : str =(
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(a_ ):
num_spaces_between_words_list[i] += 1
__UpperCamelCase : List[Any] =[]
for i in range(a_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(a_ )
__UpperCamelCase : Optional[int] =[]
__UpperCamelCase : list[str] =[]
__UpperCamelCase : Dict =0
for word in words:
if width + len(a_ ) + len(a_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(a_ )
width += len(a_ )
else:
# justify the line and add it to result
answer.append(justify(a_ ,a_ ,a_ ) )
# reset new line and new width
__UpperCamelCase , __UpperCamelCase : Tuple =[word], len(a_ )
__UpperCamelCase : Optional[Any] =max_width - width - len(a_ )
answer.append(' '.join(a_ ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 154
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
A_ :Dict = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
A_ :List[str] = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
A_ :Dict = {
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
A_ :Tuple = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
A_ :List[str] = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
A_ :int = {
'''num_train_timesteps''': 151,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def A ( a_ ) -> Optional[Any]:
if isinstance(a_ ,a_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def A ( a_ ,a_ ,a_ ,a_ ,a_=False ) -> Dict:
__UpperCamelCase : str =checkpoint[F'{old_prefix}.in_layers.0.weight']
__UpperCamelCase : Optional[Any] =checkpoint[F'{old_prefix}.in_layers.0.bias']
__UpperCamelCase : Dict =checkpoint[F'{old_prefix}.in_layers.2.weight']
__UpperCamelCase : Dict =checkpoint[F'{old_prefix}.in_layers.2.bias']
__UpperCamelCase : int =checkpoint[F'{old_prefix}.emb_layers.1.weight']
__UpperCamelCase : str =checkpoint[F'{old_prefix}.emb_layers.1.bias']
__UpperCamelCase : List[Any] =checkpoint[F'{old_prefix}.out_layers.0.weight']
__UpperCamelCase : List[str] =checkpoint[F'{old_prefix}.out_layers.0.bias']
__UpperCamelCase : str =checkpoint[F'{old_prefix}.out_layers.3.weight']
__UpperCamelCase : Any =checkpoint[F'{old_prefix}.out_layers.3.bias']
if has_skip:
__UpperCamelCase : Union[str, Any] =checkpoint[F'{old_prefix}.skip_connection.weight']
__UpperCamelCase : Dict =checkpoint[F'{old_prefix}.skip_connection.bias']
return new_checkpoint
def A ( a_ ,a_ ,a_ ,a_ ,a_=None ) -> int:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] =checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 ,dim=0 )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] =checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 ,dim=0 )
__UpperCamelCase : Tuple =checkpoint[F'{old_prefix}.norm.weight']
__UpperCamelCase : Optional[Any] =checkpoint[F'{old_prefix}.norm.bias']
__UpperCamelCase : Union[str, Any] =weight_q.squeeze(-1 ).squeeze(-1 )
__UpperCamelCase : Union[str, Any] =bias_q.squeeze(-1 ).squeeze(-1 )
__UpperCamelCase : Dict =weight_k.squeeze(-1 ).squeeze(-1 )
__UpperCamelCase : Optional[int] =bias_k.squeeze(-1 ).squeeze(-1 )
__UpperCamelCase : str =weight_v.squeeze(-1 ).squeeze(-1 )
__UpperCamelCase : Optional[Any] =bias_v.squeeze(-1 ).squeeze(-1 )
__UpperCamelCase : Tuple =(
checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
__UpperCamelCase : Union[str, Any] =checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def A ( a_ ,a_ ) -> Union[str, Any]:
__UpperCamelCase : Tuple =torch.load(a_ ,map_location='cpu' )
__UpperCamelCase : List[str] ={}
__UpperCamelCase : Any =checkpoint['time_embed.0.weight']
__UpperCamelCase : int =checkpoint['time_embed.0.bias']
__UpperCamelCase : Tuple =checkpoint['time_embed.2.weight']
__UpperCamelCase : List[str] =checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
__UpperCamelCase : List[str] =checkpoint['label_emb.weight']
__UpperCamelCase : Tuple =checkpoint['input_blocks.0.0.weight']
__UpperCamelCase : str =checkpoint['input_blocks.0.0.bias']
__UpperCamelCase : Optional[Any] =unet_config['down_block_types']
__UpperCamelCase : Optional[int] =unet_config['layers_per_block']
__UpperCamelCase : int =unet_config['attention_head_dim']
__UpperCamelCase : List[str] =unet_config['block_out_channels']
__UpperCamelCase : Tuple =1
__UpperCamelCase : Union[str, Any] =channels_list[0]
for i, layer_type in enumerate(a_ ):
__UpperCamelCase : List[Any] =channels_list[i]
__UpperCamelCase : Optional[int] =current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(a_ ):
__UpperCamelCase : Dict =F'down_blocks.{i}.resnets.{j}'
__UpperCamelCase : Any =F'input_blocks.{current_layer}.0'
__UpperCamelCase : Any =True if j == 0 and downsample_block_has_skip else False
__UpperCamelCase : Optional[int] =convert_resnet(a_ ,a_ ,a_ ,a_ ,has_skip=a_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(a_ ):
__UpperCamelCase : Optional[Any] =F'down_blocks.{i}.resnets.{j}'
__UpperCamelCase : Union[str, Any] =F'input_blocks.{current_layer}.0'
__UpperCamelCase : Any =True if j == 0 and downsample_block_has_skip else False
__UpperCamelCase : List[str] =convert_resnet(a_ ,a_ ,a_ ,a_ ,has_skip=a_ )
__UpperCamelCase : Union[str, Any] =F'down_blocks.{i}.attentions.{j}'
__UpperCamelCase : Optional[Any] =F'input_blocks.{current_layer}.1'
__UpperCamelCase : Optional[Any] =convert_attention(
a_ ,a_ ,a_ ,a_ ,a_ )
current_layer += 1
if i != len(a_ ) - 1:
__UpperCamelCase : List[Any] =F'down_blocks.{i}.downsamplers.0'
__UpperCamelCase : int =F'input_blocks.{current_layer}.0'
__UpperCamelCase : str =convert_resnet(a_ ,a_ ,a_ ,a_ )
current_layer += 1
__UpperCamelCase : Optional[Any] =current_channels
# hardcoded the mid-block for now
__UpperCamelCase : Any ='mid_block.resnets.0'
__UpperCamelCase : List[str] ='middle_block.0'
__UpperCamelCase : Union[str, Any] =convert_resnet(a_ ,a_ ,a_ ,a_ )
__UpperCamelCase : List[Any] ='mid_block.attentions.0'
__UpperCamelCase : Optional[Any] ='middle_block.1'
__UpperCamelCase : List[Any] =convert_attention(a_ ,a_ ,a_ ,a_ ,a_ )
__UpperCamelCase : str ='mid_block.resnets.1'
__UpperCamelCase : Optional[Any] ='middle_block.2'
__UpperCamelCase : int =convert_resnet(a_ ,a_ ,a_ ,a_ )
__UpperCamelCase : List[Any] =0
__UpperCamelCase : Optional[int] =unet_config['up_block_types']
for i, layer_type in enumerate(a_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
__UpperCamelCase : Any =F'up_blocks.{i}.resnets.{j}'
__UpperCamelCase : Dict =F'output_blocks.{current_layer}.0'
__UpperCamelCase : Dict =convert_resnet(a_ ,a_ ,a_ ,a_ ,has_skip=a_ )
current_layer += 1
if i != len(a_ ) - 1:
__UpperCamelCase : List[str] =F'up_blocks.{i}.upsamplers.0'
__UpperCamelCase : Optional[Any] =F'output_blocks.{current_layer-1}.1'
__UpperCamelCase : Union[str, Any] =convert_resnet(a_ ,a_ ,a_ ,a_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
__UpperCamelCase : Optional[Any] =F'up_blocks.{i}.resnets.{j}'
__UpperCamelCase : Optional[Any] =F'output_blocks.{current_layer}.0'
__UpperCamelCase : str =convert_resnet(a_ ,a_ ,a_ ,a_ ,has_skip=a_ )
__UpperCamelCase : Optional[int] =F'up_blocks.{i}.attentions.{j}'
__UpperCamelCase : Tuple =F'output_blocks.{current_layer}.1'
__UpperCamelCase : str =convert_attention(
a_ ,a_ ,a_ ,a_ ,a_ )
current_layer += 1
if i != len(a_ ) - 1:
__UpperCamelCase : Optional[Any] =F'up_blocks.{i}.upsamplers.0'
__UpperCamelCase : str =F'output_blocks.{current_layer-1}.2'
__UpperCamelCase : str =convert_resnet(a_ ,a_ ,a_ ,a_ )
__UpperCamelCase : Any =checkpoint['out.0.weight']
__UpperCamelCase : Any =checkpoint['out.0.bias']
__UpperCamelCase : Optional[Any] =checkpoint['out.2.weight']
__UpperCamelCase : List[Any] =checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
A_ :int = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
A_ :List[Any] = parser.parse_args()
A_ :Tuple = strabool(args.class_cond)
A_ :List[str] = os.path.basename(args.unet_path)
print(f"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
A_ :Optional[int] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
A_ :List[Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
A_ :Optional[int] = TEST_UNET_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
A_ :List[str] = None
A_ :List[str] = con_pt_to_diffuser(args.unet_path, unet_config)
A_ :str = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
A_ :List[str] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
A_ :Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
A_ :Dict = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
A_ :Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config)
A_ :int = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 154
| 1
|
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
a_ : List[Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def UpperCAmelCase ( A__: Union[str, Any] , A__: List[str] ) -> Optional[Any]:
inspect_dataset(A__ , A__ )
__lowerCamelCase : Any = path + '.py'
assert script_name in os.listdir(A__ )
assert "__pycache__" not in os.listdir(A__ )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def UpperCAmelCase ( A__: str , A__: Tuple ) -> Any:
inspect_metric(A__ , A__ )
__lowerCamelCase : str = path + '.py'
assert script_name in os.listdir(A__ )
assert "__pycache__" not in os.listdir(A__ )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def UpperCAmelCase ( A__: Any , A__: int , A__: Dict ) -> Any:
__lowerCamelCase : Any = get_dataset_config_info(A__ , config_name=A__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def UpperCAmelCase ( A__: List[Any] , A__: Optional[int] , A__: str ) -> str:
with pytest.raises(A__ ):
get_dataset_config_info(A__ , config_name=A__ )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def UpperCAmelCase ( A__: List[str] , A__: Any ) -> Any:
__lowerCamelCase : Dict = get_dataset_config_names(A__ )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def UpperCAmelCase ( A__: str , A__: Union[str, Any] , A__: Optional[int] ) -> Any:
__lowerCamelCase : List[Any] = get_dataset_infos(A__ )
assert list(infos.keys() ) == expected_configs
__lowerCamelCase : int = expected_configs[0]
assert expected_config in infos
__lowerCamelCase : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def UpperCAmelCase ( A__: Dict , A__: str , A__: List[str] ) -> List[Any]:
__lowerCamelCase : Tuple = get_dataset_infos(A__ )
assert expected_config in infos
__lowerCamelCase : List[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def UpperCAmelCase ( A__: Optional[Any] , A__: Optional[Any] , A__: List[Any] ) -> Dict:
with pytest.raises(A__ ):
get_dataset_split_names(A__ , config_name=A__ )
| 594
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __lowercase( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
__lowerCamelCase : int = get_activation('swish' )
self.assertIsInstance(__a , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def snake_case_ ( self ):
__lowerCamelCase : Tuple = get_activation('silu' )
self.assertIsInstance(__a , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def snake_case_ ( self ):
__lowerCamelCase : Dict = get_activation('mish' )
self.assertIsInstance(__a , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def snake_case_ ( self ):
__lowerCamelCase : Tuple = get_activation('gelu' )
self.assertIsInstance(__a , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 594
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
a : Optional[Any] = logging.get_logger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : Any ) -> List[str]:
# initialize config
if "resnet-50" in model_name:
__snake_case = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
__snake_case = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
__snake_case = DetrConfig(use_timm_backbone=_UpperCAmelCase , backbone_config=_UpperCAmelCase )
# set label attributes
__snake_case = '''panoptic''' in model_name
if is_panoptic:
__snake_case = 2_50
else:
__snake_case = 91
__snake_case = '''huggingface/label-files'''
__snake_case = '''coco-detection-id2label.json'''
__snake_case = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) )
__snake_case = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __UpperCAmelCase ( _UpperCAmelCase : Tuple ) -> str:
# here we list all keys to be renamed (original name on the left, our name on the right)
__snake_case = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : List[str] ) -> List[str]:
__snake_case = state_dict.pop(_UpperCAmelCase )
__snake_case = val
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict=False ) -> Optional[Any]:
__snake_case = ''''''
if is_panoptic:
__snake_case = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__snake_case = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
__snake_case = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__snake_case = in_proj_weight[:2_56, :]
__snake_case = in_proj_bias[:2_56]
__snake_case = in_proj_weight[2_56:5_12, :]
__snake_case = in_proj_bias[2_56:5_12]
__snake_case = in_proj_weight[-2_56:, :]
__snake_case = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__snake_case = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
__snake_case = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__snake_case = in_proj_weight[:2_56, :]
__snake_case = in_proj_bias[:2_56]
__snake_case = in_proj_weight[2_56:5_12, :]
__snake_case = in_proj_bias[2_56:5_12]
__snake_case = in_proj_weight[-2_56:, :]
__snake_case = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
__snake_case = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
__snake_case = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__snake_case = in_proj_weight_cross_attn[:2_56, :]
__snake_case = in_proj_bias_cross_attn[:2_56]
__snake_case = in_proj_weight_cross_attn[2_56:5_12, :]
__snake_case = in_proj_bias_cross_attn[2_56:5_12]
__snake_case = in_proj_weight_cross_attn[-2_56:, :]
__snake_case = in_proj_bias_cross_attn[-2_56:]
def __UpperCAmelCase ( ) -> Optional[int]:
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[Any]=False ) -> Dict:
__snake_case = get_detr_config(_UpperCAmelCase )
# load original model from torch hub
__snake_case = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(F'''Converting model {model_name}...''' )
__snake_case = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=_UpperCAmelCase ).eval()
__snake_case = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_UpperCAmelCase ):
if is_panoptic:
__snake_case = '''detr.''' + src
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_UpperCAmelCase , is_panoptic=_UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__snake_case = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
__snake_case = state_dict.pop(_UpperCAmelCase )
__snake_case = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__snake_case = state_dict.pop(_UpperCAmelCase )
__snake_case = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
__snake_case = state_dict.pop(_UpperCAmelCase )
__snake_case = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
__snake_case = state_dict.pop(_UpperCAmelCase )
__snake_case = val
# finally, create HuggingFace model and load state dict
__snake_case = DetrForSegmentation(_UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# verify our conversion on an image
__snake_case = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__snake_case = DetrImageProcessor(format=_UpperCAmelCase )
__snake_case = processor(images=prepare_img() , return_tensors="pt" )
__snake_case = encoding['''pixel_values''']
__snake_case = detr(_UpperCAmelCase )
__snake_case = model(_UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
a : Dict = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 700
|
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def A ( self : Any ):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(a_ ) for k, v in self.__dict__.items()} )
| 680
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
A_ = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def _UpperCamelCase ( ) -> str:
lowerCamelCase_ = _ask_options(
'In which compute environment are you running?' ,['This machine', 'AWS (Amazon SageMaker)'] ,_convert_compute_environment ,)
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCamelCase_ = get_sagemaker_input()
else:
lowerCamelCase_ = get_cluster_input()
return config
def _UpperCamelCase ( __UpperCamelCase=None ) -> Union[str, Any]:
if subparsers is not None:
lowerCamelCase_ = subparsers.add_parser('config' ,description=__UpperCamelCase )
else:
lowerCamelCase_ = argparse.ArgumentParser('Accelerate config command' ,description=__UpperCamelCase )
parser.add_argument(
'--config_file' ,default=__UpperCamelCase ,help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) ,)
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def _UpperCamelCase ( __UpperCamelCase ) -> Tuple:
lowerCamelCase_ = get_user_input()
if args.config_file is not None:
lowerCamelCase_ = args.config_file
else:
if not os.path.isdir(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
lowerCamelCase_ = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(__UpperCamelCase )
else:
config.to_yaml_file(__UpperCamelCase )
print(f'''accelerate configuration saved at {config_file}''' )
def _UpperCamelCase ( ) -> List[Any]:
lowerCamelCase_ = config_command_parser()
lowerCamelCase_ = parser.parse_args()
config_command(__UpperCamelCase )
if __name__ == "__main__":
main()
| 42
|
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCamelCase_ ( nn.Module ):
_A : int
_A : jnp.dtype = jnp.floataa
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , snake_case__ ) -> Tuple:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = hidden_states.shape
UpperCAmelCase = jax.image.resize(
snake_case__ , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
UpperCAmelCase = self.conv(snake_case__ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
_A : int
_A : jnp.dtype = jnp.floataa
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , snake_case__ ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.conv(snake_case__ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
_A : int
_A : int = None
_A : float = 0.0
_A : bool = None
_A : jnp.dtype = jnp.floataa
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.in_channels if self.out_channels is None else self.out_channels
UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
UpperCAmelCase = nn.Conv(
snake_case__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCAmelCase = nn.Dense(snake_case__ , dtype=self.dtype )
UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
UpperCAmelCase = nn.Dropout(self.dropout_prob )
UpperCAmelCase = nn.Conv(
snake_case__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCAmelCase = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
UpperCAmelCase = None
if use_nin_shortcut:
UpperCAmelCase = nn.Conv(
snake_case__ , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self , snake_case__ , snake_case__ , snake_case__=True ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = hidden_states
UpperCAmelCase = self.norma(snake_case__ )
UpperCAmelCase = nn.swish(snake_case__ )
UpperCAmelCase = self.conva(snake_case__ )
UpperCAmelCase = self.time_emb_proj(nn.swish(snake_case__ ) )
UpperCAmelCase = jnp.expand_dims(jnp.expand_dims(snake_case__ , 1 ) , 1 )
UpperCAmelCase = hidden_states + temb
UpperCAmelCase = self.norma(snake_case__ )
UpperCAmelCase = nn.swish(snake_case__ )
UpperCAmelCase = self.dropout(snake_case__ , snake_case__ )
UpperCAmelCase = self.conva(snake_case__ )
if self.conv_shortcut is not None:
UpperCAmelCase = self.conv_shortcut(snake_case__ )
return hidden_states + residual
| 673
| 0
|
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , A__ : str , A__ : Dict=2 , A__ : str=5_6 , A__ : Optional[Any]=True , A__ : str=True , A__ : Union[str, Any]=True , A__ : Optional[Any]=True , A__ : Any=9_9 , A__ : str=3_2 , A__ : Union[str, Any]=2 , A__ : Tuple=2 , A__ : Union[str, Any]=7 , A__ : Any="gelu_new" , A__ : Dict=0.1 , A__ : Any=0.1 , A__ : List[Any]=5_1_2 , A__ : List[str]=1_6 , A__ : List[Any]=2 , A__ : Tuple=0.02 , A__ : Any=4 , A__ : str="block_sparse" , A__ : List[str]=True , A__ : int=False , A__ : str=2 , A__ : Dict=3 , ) -> List[str]:
'''simple docstring'''
a__ : str = parent
a__ : Optional[int] = batch_size
a__ : List[Any] = seq_length
a__ : Tuple = is_training
a__ : Any = use_attention_mask
a__ : str = use_token_type_ids
a__ : List[Any] = use_labels
a__ : int = vocab_size
a__ : Any = hidden_size
a__ : Dict = num_hidden_layers
a__ : List[Any] = num_attention_heads
a__ : Any = intermediate_size
a__ : List[Any] = hidden_act
a__ : Any = hidden_dropout_prob
a__ : List[str] = attention_probs_dropout_prob
a__ : Dict = max_position_embeddings
a__ : List[Any] = type_vocab_size
a__ : str = type_sequence_label_size
a__ : Tuple = initializer_range
a__ : Tuple = num_choices
a__ : Optional[Any] = rescale_embeddings
a__ : Union[str, Any] = attention_type
a__ : int = use_bias
a__ : List[Any] = block_size
a__ : int = num_random_blocks
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
a__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : int = None
if self.use_attention_mask:
a__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a__ : Dict = None
if self.use_token_type_ids:
a__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ : Any = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
'''simple docstring'''
a__ : int = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ : List[str] = config_and_inputs
a__ : Union[str, Any] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__UpperCamelCase = False
__UpperCamelCase = False
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowerCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowerCAmelCase ( self : Optional[Any] ) -> Any:
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowerCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
super().test_hidden_states_output()
@slow
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
a__ : Union[str, Any] = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(A__ )
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowerCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
a__ , a__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a__ : Optional[Any] = self._prepare_for_class(A__ , A__ )
a__ : Optional[int] = model_class(A__ )
@jax.jit
def model_jitted(A__ : Optional[int] , A__ : Optional[int]=None , **A__ : str ):
return model(input_ids=A__ , attention_mask=A__ , **A__ )
with self.subTest('''JIT Enabled''' ):
a__ : Optional[Any] = model_jitted(**A__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
a__ : List[Any] = model_jitted(**A__ ).to_tuple()
self.assertEqual(len(A__ ) , len(A__ ) )
for jitted_output, output in zip(A__ , A__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCAmelCase ( self : Optional[Any] , A__ : List[Any] , A__ : Dict , A__ : Dict , A__ : str=1E-5 , A__ : Optional[int]="outputs" , A__ : Union[str, Any]=None ) -> List[Any]:
'''simple docstring'''
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(A__ , A__ , A__ , A__ , A__ , A__ )
| 340
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __a ( lowerCAmelCase__ : List[Any] ):
a__ : Union[str, Any] = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
if "model" in sd.keys():
a__ : List[str] = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
a__ : Optional[Any] = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowerCAmelCase__ )
a__ : Any = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
a__ : Dict = sd.pop(lowerCAmelCase__ )
a__ : Union[str, Any] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
a__ : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
a__ : Optional[Any] = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
a__ : List[str] = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
a__ : Optional[int] = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
a__ : Union[str, Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
a__ , a__ , a__ : Optional[int] = torch.split(lowerCAmelCase__ , depth // 3 , dim=0 )
a__ : Tuple = q
a__ : Union[str, Any] = k
a__ : Dict = v
del sd[key]
return sd
@torch.no_grad()
def __a ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any]=None ):
a__ : Any = load_checkpoint(lowerCAmelCase__ )
if config is not None:
a__ : List[Any] = OPTConfig.from_pretrained(lowerCAmelCase__ )
else:
a__ : Union[str, Any] = OPTConfig()
a__ : Union[str, Any] = OPTModel(lowerCAmelCase__ ).half().eval()
model.load_state_dict(lowerCAmelCase__ )
# Check results
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 340
| 1
|
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class a_ :
A__ : int
A__ : int
class a_ :
def __init__( self : List[Any] , UpperCAmelCase__ : int ):
"""simple docstring"""
snake_case : list[list[Edge]] = [[] for _ in range(UpperCAmelCase__ )]
snake_case : Union[str, Any] = size
def __getitem__( self : List[Any] , UpperCAmelCase__ : int ):
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
return self._size
def lowerCAmelCase( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(UpperCAmelCase__ , UpperCAmelCase__ ) )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
"""simple docstring"""
snake_case : Union[str, Any] = deque([start_vertex] )
snake_case : list[int | None] = [None] * self.size
snake_case : int = 0
while queue:
snake_case : str = queue.popleft()
snake_case : Optional[int] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
snake_case : Any = current_distance + edge.weight
snake_case : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and new_distance >= dest_vertex_distance
):
continue
snake_case : Optional[int] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 598
|
from math import log
from scipy.constants import Boltzmann, physical_constants
_a : List[str] = 300 # TEMPERATURE (unit = K)
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''' )
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''' )
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 598
| 1
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_snake_case = logging.get_logger(__name__)
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 170
|
def __lowerCamelCase ( _lowercase , _lowercase = False ) -> str:
if not isinstance(_lowercase , _lowercase ):
UpperCamelCase = F'Expected string as input, found {type(_lowercase )}'
raise ValueError(_lowercase )
if not isinstance(_lowercase , _lowercase ):
UpperCamelCase = F'Expected boolean as use_pascal parameter, found {type(_lowercase )}'
raise ValueError(_lowercase )
UpperCamelCase = input_str.split('_' )
UpperCamelCase = 0 if use_pascal else 1
UpperCamelCase = words[start_index:]
UpperCamelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCamelCase = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 170
| 1
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] ):
return F"gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCAmelCase ) for s in shape] )}.npy"
def lowerCAmelCase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : int=0 , _lowerCAmelCase : Dict=(4, 4, 64, 64) , _lowerCAmelCase : List[str]=False ):
SCREAMING_SNAKE_CASE_ = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE_ = jnp.array(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) , dtype=_lowerCAmelCase )
return image
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Optional[Any]="CompVis/stable-diffusion-v1-4" ):
SCREAMING_SNAKE_CASE_ = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE_ = 'bf16' if fpaa else None
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = FlaxUNetaDConditionModel.from_pretrained(
_lowerCAmelCase , subfolder='unet' , dtype=_lowerCAmelCase , revision=_lowerCAmelCase )
return model, params
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Tuple=0 , _lowerCAmelCase : List[str]=(4, 77, 768) , _lowerCAmelCase : List[str]=False ):
SCREAMING_SNAKE_CASE_ = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE_ = jnp.array(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) , dtype=_lowerCAmelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.get_latents(_lowerCAmelCase , fpaa=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.get_encoder_hidden_states(_lowerCAmelCase , fpaa=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model.apply(
{'params': params} , _lowerCAmelCase , jnp.array(_lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=_lowerCAmelCase , ).sample
assert sample.shape == latents.shape
SCREAMING_SNAKE_CASE_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = jnp.array(_lowerCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.get_latents(_lowerCAmelCase , shape=(4, 4, 96, 96) , fpaa=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.get_encoder_hidden_states(_lowerCAmelCase , shape=(4, 77, 1_024) , fpaa=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model.apply(
{'params': params} , _lowerCAmelCase , jnp.array(_lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=_lowerCAmelCase , ).sample
assert sample.shape == latents.shape
SCREAMING_SNAKE_CASE_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = jnp.array(_lowerCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
| 31
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> float:
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , ) -> float:
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
lowerCAmelCase__ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695
| 0
|
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _snake_case ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] = False , ):
super().__init__()
lowerCAmelCase_ : Optional[int] =nn.Embedding(a_ , a_ )
lowerCAmelCase_ : int =nn.Embedding(a_ , a_ )
lowerCAmelCase_ : Any =False
lowerCAmelCase_ : Any =nn.Dropout(p=a_ )
lowerCAmelCase_ : Union[str, Any] =TaConfig(
vocab_size=a_ , d_model=a_ , num_heads=a_ , d_kv=a_ , d_ff=a_ , dropout_rate=a_ , feed_forward_proj=a_ , is_decoder=a_ , is_encoder_decoder=a_ , )
lowerCAmelCase_ : Optional[Any] =nn.ModuleList()
for lyr_num in range(a_ ):
lowerCAmelCase_ : List[str] =TaBlock(a_ )
self.encoders.append(a_ )
lowerCAmelCase_ : List[Any] =TaLayerNorm(a_ )
lowerCAmelCase_ : Optional[Any] =nn.Dropout(p=a_ )
def __A ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase_ : Tuple =self.token_embedder(a_ )
lowerCAmelCase_ : int =encoder_input_tokens.shape[1]
lowerCAmelCase_ : int =torch.arange(a_ , device=encoder_input_tokens.device )
x += self.position_encoding(a_ )
lowerCAmelCase_ : int =self.dropout_pre(a_ )
# inverted the attention mask
lowerCAmelCase_ : List[str] =encoder_input_tokens.size()
lowerCAmelCase_ : Tuple =self.get_extended_attention_mask(a_ , a_ )
for lyr in self.encoders:
lowerCAmelCase_ : List[Any] =lyr(a_ , a_ )[0]
lowerCAmelCase_ : str =self.layer_norm(a_ )
return self.dropout_post(a_ ), encoder_inputs_mask
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''MobileNetV2FeatureExtractor''']
__lowercase = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 305
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[int] = """xlm"""
snake_case : Tuple = {
"""hidden_size""": """emb_dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
"""n_words""": """vocab_size""", # For backward compatibility
}
def __init__( self , __lowerCAmelCase=30145 , __lowerCAmelCase=2048 , __lowerCAmelCase=12 , __lowerCAmelCase=16 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=1 , __lowerCAmelCase=True , __lowerCAmelCase=512 , __lowerCAmelCase=2048**-0.5 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0.02 , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=5 , __lowerCAmelCase=True , __lowerCAmelCase="first" , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=0.1 , __lowerCAmelCase=5 , __lowerCAmelCase=5 , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase=0 , **__lowerCAmelCase , ):
UpperCamelCase__ = vocab_size
UpperCamelCase__ = emb_dim
UpperCamelCase__ = n_layers
UpperCamelCase__ = n_heads
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = gelu_activation
UpperCamelCase__ = sinusoidal_embeddings
UpperCamelCase__ = causal
UpperCamelCase__ = asm
UpperCamelCase__ = n_langs
UpperCamelCase__ = use_lang_emb
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = bos_index
UpperCamelCase__ = eos_index
UpperCamelCase__ = pad_index
UpperCamelCase__ = unk_index
UpperCamelCase__ = mask_index
UpperCamelCase__ = is_encoder
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = embed_init_std
UpperCamelCase__ = init_std
UpperCamelCase__ = summary_type
UpperCamelCase__ = summary_use_proj
UpperCamelCase__ = summary_activation
UpperCamelCase__ = summary_proj_to_labels
UpperCamelCase__ = summary_first_dropout
UpperCamelCase__ = start_n_top
UpperCamelCase__ = end_n_top
UpperCamelCase__ = mask_token_id
UpperCamelCase__ = lang_id
if "n_words" in kwargs:
UpperCamelCase__ = kwargs["""n_words"""]
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a ):
@property
def _lowerCamelCase ( self ):
if self.task == "multiple-choice":
UpperCamelCase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCamelCase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 619
|
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCamelCase__ = getLogger(__name__)
def _UpperCamelCase (a__ :Union[str, Any] , a__ :str , a__ :str , a__ :int = 8 , a__ :int = 1024 , a__ :Dict="val" , a__ :Optional[int]=None , a__ :List[str]=False , a__ :Union[str, Any]="summarization" , a__ :str=None , a__ :Optional[Any]=1 , a__ :Dict = None , a__ :List[Any]="" , **a__ :List[str] , ):
"""simple docstring"""
UpperCamelCase__ = str(a__ )
assert local_rank is not None
torch.distributed.init_process_group(backend="""nccl""" , rank=a__ )
UpperCamelCase__ = Path(a__ )
UpperCamelCase__ = save_dir.joinpath(f"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(a__ )
UpperCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained(a__ ).cuda()
if fpaa:
UpperCamelCase__ = model.half()
# determine if we need to increase num_beams
use_task_specific_params(a__ , a__ ) # update config with task specific params
UpperCamelCase__ = generate_kwargs.pop("""num_beams""" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCamelCase__ = num_return_sequences
UpperCamelCase__ = AutoTokenizer.from_pretrained(a__ )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCamelCase__ = tokenizer.model_max_length
if prefix is None:
UpperCamelCase__ = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
UpperCamelCase__ = SeqaSeqDataset(
a__ , a__ , a__ , max_target_length=1024 , type_path=a__ , n_obs=a__ , prefix=a__ , **a__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCamelCase__ = ds.make_sortish_sampler(a__ , distributed=a__ , add_extra_examples=a__ , shuffle=a__ )
UpperCamelCase__ = DataLoader(a__ , sampler=a__ , batch_size=a__ , collate_fn=ds.collate_fn )
UpperCamelCase__ = []
for batch in tqdm(a__ ):
UpperCamelCase__ = model.generate(
input_ids=batch["""input_ids"""].to(model.device ) , attention_mask=batch["""attention_mask"""].to(model.device ) , num_return_sequences=a__ , num_beams=a__ , **a__ , )
UpperCamelCase__ = tokenizer.batch_decode(a__ , skip_special_tokens=a__ , clean_up_tokenization_spaces=a__ )
UpperCamelCase__ = batch["""ids"""]
if num_return_sequences > 1:
UpperCamelCase__ = chunks(a__ , a__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(a__ ):
results.append({"""pred""": pred, """id""": ids[i].item()} )
save_json(a__ , a__ )
return results, sampler.num_replicas
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = argparse.ArgumentParser(
epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" )
parser.add_argument("""--data_dir""" , type=a__ , help="""like cnn_dm/test.source""" )
parser.add_argument(
"""--model_name""" , type=a__ , help="""like facebook/bart-large-cnn,t5-base, etc.""" , default="""sshleifer/distilbart-xsum-12-3""" , )
parser.add_argument("""--save_dir""" , type=a__ , help="""where to save""" , default="""tmp_gen""" )
parser.add_argument("""--max_source_length""" , type=a__ , default=a__ )
parser.add_argument(
"""--type_path""" , type=a__ , default="""test""" , help="""which subset to evaluate typically train/val/test""" )
parser.add_argument("""--task""" , type=a__ , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=a__ , default=8 , required=a__ , help="""batch size""" )
parser.add_argument(
"""--local_rank""" , type=a__ , default=-1 , required=a__ , help="""should be passed by distributed.launch""" )
parser.add_argument(
"""--n_obs""" , type=a__ , default=a__ , required=a__ , help="""How many observations. Defaults to all.""" )
parser.add_argument(
"""--num_return_sequences""" , type=a__ , default=1 , required=a__ , help="""How many sequences to return""" )
parser.add_argument(
"""--sync_timeout""" , type=a__ , default=600 , required=a__ , help="""How long should master process wait for other processes to finish.""" , )
parser.add_argument("""--src_lang""" , type=a__ , default=a__ , required=a__ )
parser.add_argument("""--tgt_lang""" , type=a__ , default=a__ , required=a__ )
parser.add_argument(
"""--prefix""" , type=a__ , required=a__ , default=a__ , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--debug""" , action="""store_true""" )
UpperCamelCase__ = time.time()
UpperCamelCase__ , UpperCamelCase__ = parser.parse_known_args()
UpperCamelCase__ = parse_numeric_n_bool_cl_kwargs(a__ )
if generate_kwargs and args.local_rank <= 0:
print(f"""parsed the following generate kwargs: {generate_kwargs}""" )
UpperCamelCase__ = Path(args.save_dir + """_tmp""" )
Path(a__ ).mkdir(exist_ok=a__ ) # this handles locking.
UpperCamelCase__ = list(json_save_dir.glob("""rank_*.json""" ) )
if intermediate_files:
raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCamelCase__ = {}
if args.src_lang is not None:
UpperCamelCase__ = args.src_lang
if args.tgt_lang is not None:
UpperCamelCase__ = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=a__ )
UpperCamelCase__ , UpperCamelCase__ = eval_data_dir(
args.data_dir , a__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=a__ , **a__ , )
if args.local_rank <= 0:
UpperCamelCase__ = Path(args.save_dir )
save_dir.mkdir(exist_ok=a__ )
UpperCamelCase__ = gather_results_from_each_node(a__ , a__ , args.sync_timeout )
UpperCamelCase__ = combine_partial_results(a__ )
if args.num_return_sequences > 1:
UpperCamelCase__ = save_dir.joinpath("""pseudolabel_results.json""" )
print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(a__ , a__ )
return
UpperCamelCase__ = Path(args.data_dir ).joinpath(args.type_path + """.target""" )
with open(a__ ) as f:
UpperCamelCase__ = [x.rstrip() for x in f.readlines()][: len(a__ )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCamelCase__ = """translation""" in args.task
UpperCamelCase__ = calculate_bleu if calc_bleu else calculate_rouge
UpperCamelCase__ = """bleu""" if calc_bleu else """rouge"""
UpperCamelCase__ = score_fn(a__ , a__ )
UpperCamelCase__ = len(a__ )
UpperCamelCase__ = time.time() - start_time
UpperCamelCase__ = round(runtime / metrics["""n_obs"""] , 4 )
UpperCamelCase__ = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCamelCase__ = save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" )
save_json(a__ , a__ , indent=a__ )
print(a__ )
write_txt_file(a__ , save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(a__ , save_dir.joinpath(f"""{args.type_path}.target""" ) )
else:
shutil.rmtree(a__ )
def _UpperCamelCase (a__ :Dict ):
"""simple docstring"""
UpperCamelCase__ = []
for partial_result in partial_results:
records.extend(a__ )
UpperCamelCase__ = sorted(a__ , key=lambda a__ : x["id"] )
UpperCamelCase__ = [x["""pred"""] for x in records]
return preds
def _UpperCamelCase (a__ :List[str] , a__ :str , a__ :List[Any] ):
"""simple docstring"""
UpperCamelCase__ = time.time()
logger.info("""waiting for all nodes to finish""" )
UpperCamelCase__ = None
while (time.time() - start_wait) < timeout:
UpperCamelCase__ = list(save_dir.glob("""rank_*.json""" ) )
if len(a__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCamelCase__ = lmap(a__ , a__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("""Rank 0 gave up on waiting for other processes""" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 619
| 1
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[2, 2, 3, 2] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=["stage2", "stage3", "stage4"] , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = parent
SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE_ : List[str] = image_size
SCREAMING_SNAKE_CASE_ : int = num_channels
SCREAMING_SNAKE_CASE_ : List[str] = num_stages
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_sizes
SCREAMING_SNAKE_CASE_ : List[Any] = depths
SCREAMING_SNAKE_CASE_ : Any = is_training
SCREAMING_SNAKE_CASE_ : List[Any] = use_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Any = out_features
SCREAMING_SNAKE_CASE_ : Any = num_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = scope
SCREAMING_SNAKE_CASE_ : int = num_stages
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : int = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def UpperCAmelCase ( self ):
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_SCREAMING_SNAKE_CASE , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_SCREAMING_SNAKE_CASE , loss_ignore_index=255 , num_labels=self.num_labels , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = UperNetForSemanticSegmentation(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _A ( __magic_name__ , __magic_name__ , unittest.TestCase):
SCREAMING_SNAKE_CASE : Dict = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : List[str] = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : str = False
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = UperNetModelTester(self )
SCREAMING_SNAKE_CASE_ : int = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self ):
"""simple docstring"""
return
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='UperNet does not have a base model' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='UperNet does not have a base model' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
def UpperCAmelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : List[str] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[int] = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Any = _config_zero_init(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : int = model_class(config=_SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip(reason='UperNet does not have tied weights' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : str = UperNetForSemanticSegmentation.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
SCREAMING_SNAKE_CASE_ : int = Image.open(a ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class _A ( unittest.TestCase):
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE_ : str = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Any = model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
SCREAMING_SNAKE_CASE_ : List[Any] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = prepare_img()
SCREAMING_SNAKE_CASE_ : List[Any] = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict = model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 353
|
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def A_ ( a = 1_0_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2
for i in range(2 , max_n + 1 ):
SCREAMING_SNAKE_CASE_ : List[str] = pre_numerator
SCREAMING_SNAKE_CASE_ : str = 2 * i // 3 if i % 3 == 0 else 1
SCREAMING_SNAKE_CASE_ : Tuple = cur_numerator
SCREAMING_SNAKE_CASE_ : Tuple = e_cont * pre_numerator + temp
return sum_digits(a )
if __name__ == "__main__":
print(F'{solution() = }')
| 353
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : List[str] = {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""",
"""funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""",
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""",
"""funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""",
}
class __snake_case ( lowerCAmelCase ):
_a : List[Any]= "funnel"
_a : List[Any]= {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self ,snake_case=30522 ,snake_case=[4, 4, 4] ,snake_case=None ,snake_case=2 ,snake_case=768 ,snake_case=12 ,snake_case=64 ,snake_case=3072 ,snake_case="gelu_new" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=0.0 ,snake_case=0.1 ,snake_case=None ,snake_case=1e-9 ,snake_case="mean" ,snake_case="relative_shift" ,snake_case=True ,snake_case=True ,snake_case=True ,**snake_case ,):
'''simple docstring'''
lowercase : Union[str, Any] = vocab_size
lowercase : Optional[int] = block_sizes
lowercase : Optional[Any] = [1] * len(snake_case ) if block_repeats is None else block_repeats
assert len(snake_case ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
lowercase : Tuple = num_decoder_layers
lowercase : Dict = d_model
lowercase : Optional[Any] = n_head
lowercase : Optional[Any] = d_head
lowercase : Dict = d_inner
lowercase : Optional[int] = hidden_act
lowercase : List[Any] = hidden_dropout
lowercase : Optional[Any] = attention_dropout
lowercase : str = activation_dropout
lowercase : List[str] = initializer_range
lowercase : List[str] = initializer_std
lowercase : str = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
lowercase : str = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
lowercase : Any = attention_type
lowercase : Optional[Any] = separate_cls
lowercase : int = truncate_seq
lowercase : Dict = pool_q_only
super().__init__(**snake_case )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return sum(self.block_sizes )
@num_hidden_layers.setter
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return len(self.block_sizes )
@num_blocks.setter
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 336
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowercase : Any = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
lowercase : Any = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Dict = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
lowercase : Optional[Any] = list(s_dict.keys() )
for key in keys:
lowercase : Any = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowercase : Dict = new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f"{key} -> {new_key}" )
lowercase : Dict = s_dict.pop(SCREAMING_SNAKE_CASE__ )
return s_dict
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
lowercase , lowercase : Optional[Any] = emb.weight.shape
lowercase : Dict = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
lowercase : str = emb.weight.data
return lin_layer
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> bytes:
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = os.path.basename(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = url.split("""/""" )[-2]
lowercase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not os.path.isfile(SCREAMING_SNAKE_CASE__ ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
lowercase : Any = open(SCREAMING_SNAKE_CASE__ , """rb""" ).read()
if hashlib.shaaaa(SCREAMING_SNAKE_CASE__ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(SCREAMING_SNAKE_CASE__ ) as source, open(SCREAMING_SNAKE_CASE__ , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=SCREAMING_SNAKE_CASE__ , unit_divisor=1_024 ) as loop:
while True:
lowercase : Any = source.read(8_192 )
if not buffer:
break
output.write(SCREAMING_SNAKE_CASE__ )
loop.update(len(SCREAMING_SNAKE_CASE__ ) )
lowercase : Dict = open(SCREAMING_SNAKE_CASE__ , """rb""" ).read()
if hashlib.shaaaa(SCREAMING_SNAKE_CASE__ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
if ".pt" not in checkpoint_path:
lowercase : Any = _download(_MODELS[checkpoint_path] )
else:
lowercase : Tuple = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
lowercase : Tuple = original_checkpoint["""dims"""]
lowercase : str = original_checkpoint["""model_state_dict"""]
lowercase : Tuple = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
rename_keys(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = True
lowercase : str = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
lowercase : Tuple = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=SCREAMING_SNAKE_CASE__ , decoder_ffn_dim=SCREAMING_SNAKE_CASE__ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
lowercase : Any = WhisperForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Union[str, Any] = model.model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0 and not set(SCREAMING_SNAKE_CASE__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f" but all the following weights are missing {missing}" )
if tie_embeds:
lowercase : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowercase : str = proj_out_weights
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase : Union[str, Any] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowercase : Optional[int] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 336
| 1
|
import random
def lowerCamelCase_ ( A : list , A : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def lowerCamelCase_ ( A : list , A : int ):
"""simple docstring"""
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
lowerCAmelCase_ = items[random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )]
lowerCAmelCase_ = 0
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = _partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE , index - (m + count) )
| 707
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 413
| 0
|
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
a_ = 'src/transformers'
# Matches is_xxx_available()
a_ = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
a_ = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
a_ = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
a_ = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
a_ = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
a_ = re.compile(r'^\s*try:')
# Catches a line with else:
a_ = re.compile(r'^\s*else:')
def __UpperCAmelCase ( __UpperCamelCase ):
if _re_test_backend.search(__UpperCamelCase ) is None:
return None
__lowercase : List[str] = [b[0] for b in _re_backend.findall(__UpperCamelCase )]
backends.sort()
return "_and_".join(__UpperCamelCase )
def __UpperCAmelCase ( __UpperCamelCase ):
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__lowercase : Tuple = f.readlines()
__lowercase : str = 0
while line_index < len(__UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
__lowercase : List[str] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
__lowercase : List[Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__UpperCamelCase ):
__lowercase : int = _re_one_line_import_struct.search(__UpperCamelCase ).groups()[0]
__lowercase : Union[str, Any] = re.findall('''\[([^\]]+)\]''' , __UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
__lowercase : Tuple = _re_import_struct_key_value.search(__UpperCamelCase )
if single_line_import_search is not None:
__lowercase : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(__UpperCamelCase ) > 0]
objects.extend(__UpperCamelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
__lowercase : Optional[Any] = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowercase : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase : str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase : Optional[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
__lowercase : Union[str, Any] = lines[line_index]
if _re_import_struct_add_one.search(__UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__UpperCamelCase ) is not None:
__lowercase : Optional[Any] = _re_import_struct_add_many.search(__UpperCamelCase ).groups()[0].split(''', ''' )
__lowercase : Tuple = [obj[1:-1] for obj in imports if len(__UpperCamelCase ) > 0]
objects.extend(__UpperCamelCase )
elif _re_between_brackets.search(__UpperCamelCase ) is not None:
__lowercase : int = _re_between_brackets.search(__UpperCamelCase ).groups()[0].split(''', ''' )
__lowercase : int = [obj[1:-1] for obj in imports if len(__UpperCamelCase ) > 0]
objects.extend(__UpperCamelCase )
elif _re_quote_object.search(__UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(__UpperCamelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
__lowercase : str = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowercase : Union[str, Any] = []
while (
line_index < len(__UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
__lowercase : List[str] = lines[line_index]
__lowercase : Optional[Any] = _re_import.search(__UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowercase : Tuple = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(__UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowercase : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
__lowercase : Optional[Any] = lines[line_index]
__lowercase : Optional[int] = _re_import.search(__UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowercase : List[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
def find_duplicates(__UpperCamelCase ):
return [k for k, v in collections.Counter(__UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowercase : List[str] = []
for key in import_dict_objects.keys():
__lowercase : Optional[int] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
__lowercase : List[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowercase : List[Any] = '''base imports''' if key == '''none''' else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __UpperCAmelCase ( ):
__lowercase : Tuple = []
for root, _, files in os.walk(__UpperCamelCase ):
if "__init__.py" in files:
__lowercase : Optional[int] = os.path.join(__UpperCamelCase , '''__init__.py''' )
__lowercase : Dict = parse_init(__UpperCamelCase )
if objects is not None:
__lowercase : Dict = analyze_results(*__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
__lowercase : str = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(__UpperCamelCase ) )
if len(__UpperCamelCase ) > 0:
raise ValueError('''\n\n'''.join(__UpperCamelCase ) )
def __UpperCAmelCase ( ):
__lowercase : int = []
for path, directories, files in os.walk(__UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(__UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
__lowercase : Tuple = str((Path(__UpperCamelCase ) / folder).relative_to(__UpperCamelCase ) )
__lowercase : List[Any] = short_path.replace(os.path.sep , '''.''' )
submodules.append(__UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
__lowercase : Optional[Any] = str((Path(__UpperCamelCase ) / fname).relative_to(__UpperCamelCase ) )
__lowercase : List[str] = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(__UpperCamelCase )
return submodules
a_ = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def __UpperCAmelCase ( ):
# This is to make sure the transformers module imported is the one in the repo.
__lowercase : str = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(__UpperCamelCase , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowercase : List[Any] = spec.loader.load_module()
__lowercase : int = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__UpperCamelCase ) > 0:
__lowercase : Optional[Any] = '''\n'''.join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
f"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 76
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__( self :List[str] , __snake_case :int , __snake_case :int , __snake_case :float , **__snake_case :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =feature_size
__magic_name__ : Union[str, Any] =sampling_rate
__magic_name__ : List[Any] =padding_value
__magic_name__ : List[str] =kwargs.pop("""padding_side""" , """right""" )
__magic_name__ : Tuple =kwargs.pop("""return_attention_mask""" , __snake_case )
super().__init__(**__snake_case )
def A__ ( self :Any , __snake_case :Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __snake_case :Union[bool, str, PaddingStrategy] = True , __snake_case :Optional[int] = None , __snake_case :bool = False , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , __snake_case :Optional[Union[str, TensorType]] = None , ):
'''simple docstring'''
if isinstance(__snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__magic_name__ : Union[str, Any] ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys() )}" )
__magic_name__ : int =processed_features[self.model_input_names[0]]
__magic_name__ : Union[str, Any] =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__snake_case ) == 0:
if return_attention_mask:
__magic_name__ : List[str] =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__magic_name__ : Optional[int] =required_input[0]
if isinstance(__snake_case , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__magic_name__ : Optional[Any] =0
while len(required_input[index] ) == 0:
index += 1
if index < len(__snake_case ):
__magic_name__ : List[str] =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__snake_case ):
__magic_name__ : int ="""tf"""
elif is_torch_tensor(__snake_case ):
__magic_name__ : str ="""pt"""
elif isinstance(__snake_case , (int, float, list, tuple, np.ndarray) ):
__magic_name__ : List[Any] ="""np"""
else:
raise ValueError(
f"type of {first_element} unknown: {type(__snake_case )}. "
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__magic_name__ : List[str] =to_numpy(__snake_case )
else:
__magic_name__ : str =[to_numpy(__snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
__magic_name__ : Dict =self._get_padding_strategies(padding=__snake_case , max_length=__snake_case )
__magic_name__ : Optional[Any] =processed_features[self.model_input_names[0]]
__magic_name__ : Dict =len(__snake_case )
if not all(len(__snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__magic_name__ : Optional[int] =[]
for i in range(__snake_case ):
__magic_name__ : Any ={k: v[i] for k, v in processed_features.items()}
# truncation
__magic_name__ : List[str] =self._truncate(
__snake_case , max_length=__snake_case , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
truncated_inputs.append(__snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__magic_name__ : Optional[int] =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__magic_name__ : Tuple =PaddingStrategy.MAX_LENGTH
__magic_name__ : str ={}
for i in range(__snake_case ):
# padding
__magic_name__ : List[str] =self._pad(
truncated_inputs[i] , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
for key, value in outputs.items():
if key not in batch_outputs:
__magic_name__ : Dict =[]
if value.dtype is np.dtype(np.floataa ):
__magic_name__ : Optional[int] =value.astype(np.floataa )
batch_outputs[key].append(__snake_case )
return BatchFeature(__snake_case , tensor_type=__snake_case )
def A__ ( self :Any , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
__magic_name__ : Dict =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__magic_name__ : Any =len(__snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : Dict =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : List[Any] =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__magic_name__ : int =np.ones(len(__snake_case ) , dtype=np.intaa )
if needs_to_be_padded:
__magic_name__ : List[Any] =max_length - len(__snake_case )
if self.padding_side == "right":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__magic_name__ : Tuple =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__magic_name__ : str =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__magic_name__ : Optional[int] =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__magic_name__ : List[Any] =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def A__ ( self :Optional[Any] , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__magic_name__ : Union[str, Any] =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : List[str] =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : Any =len(__snake_case ) > max_length
if needs_to_be_truncated:
__magic_name__ : List[Any] =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__magic_name__ : List[str] =processed_features["""attention_mask"""][:max_length]
return processed_features
def A__ ( self :List[Any] , __snake_case :str=False , __snake_case :Optional[int]=None ):
'''simple docstring'''
if padding is not False:
if padding is True:
__magic_name__ : Union[str, Any] =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__snake_case , __snake_case ):
__magic_name__ : Optional[int] =PaddingStrategy(__snake_case )
elif isinstance(__snake_case , __snake_case ):
__magic_name__ : Any =padding
else:
__magic_name__ : Any =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 21
| 0
|
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__lowerCamelCase = logging.get_logger(__name__)
# General docstring
__lowerCamelCase = 'MobileNetV1Config'
# Base docstring
__lowerCamelCase = 'google/mobilenet_v1_1.0_224'
__lowerCamelCase = [1, 10_24, 7, 7]
# Image classification docstring
__lowerCamelCase = 'google/mobilenet_v1_1.0_224'
__lowerCamelCase = 'tabby, tabby cat'
__lowerCamelCase = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def a ( __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict=None ) -> int:
__magic_name__: int = {}
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__magic_name__: Optional[int] = model.mobilenet_va
else:
__magic_name__: str = model
__magic_name__: List[str] = """MobilenetV1/Conv2d_0/"""
__magic_name__: List[str] = backbone.conv_stem.convolution.weight
__magic_name__: Union[str, Any] = backbone.conv_stem.normalization.bias
__magic_name__: List[Any] = backbone.conv_stem.normalization.weight
__magic_name__: str = backbone.conv_stem.normalization.running_mean
__magic_name__: Union[str, Any] = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
__magic_name__: List[str] = i + 1
__magic_name__: List[Any] = i * 2
__magic_name__: int = backbone.layer[pt_index]
__magic_name__: List[str] = f'MobilenetV1/Conv2d_{tf_index}_depthwise/'
__magic_name__: int = pointer.convolution.weight
__magic_name__: Optional[Any] = pointer.normalization.bias
__magic_name__: Tuple = pointer.normalization.weight
__magic_name__: Dict = pointer.normalization.running_mean
__magic_name__: int = pointer.normalization.running_var
__magic_name__: Union[str, Any] = backbone.layer[pt_index + 1]
__magic_name__: str = f'MobilenetV1/Conv2d_{tf_index}_pointwise/'
__magic_name__: Dict = pointer.convolution.weight
__magic_name__: str = pointer.normalization.bias
__magic_name__: Optional[int] = pointer.normalization.weight
__magic_name__: List[Any] = pointer.normalization.running_mean
__magic_name__: Optional[Any] = pointer.normalization.running_var
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__magic_name__: List[str] = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
__magic_name__: str = model.classifier.weight
__magic_name__: List[Any] = model.classifier.bias
return tf_to_pt_map
def a ( __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] ) -> Tuple:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
__magic_name__: Any = tf.train.list_variables(__UpperCAmelCase )
__magic_name__: int = {}
for name, shape in init_vars:
logger.info(f'Loading TF weight {name} with shape {shape}' )
__magic_name__: Tuple = tf.train.load_variable(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: Optional[Any] = array
# Build TF to PyTorch weights loading map
__magic_name__: List[str] = _build_tf_to_pytorch_map(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(f'Importing {name}' )
if name not in tf_weights:
logger.info(f'{name} not in tf pre-trained weights, skipping' )
continue
__magic_name__: List[Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
__magic_name__: List[str] = np.transpose(__UpperCAmelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
__magic_name__: List[Any] = array.squeeze().transpose()
else:
__magic_name__: List[Any] = np.transpose(__UpperCAmelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(f'Initialize PyTorch weight {name} {array.shape}' )
__magic_name__: Optional[int] = torch.from_numpy(__UpperCAmelCase )
tf_weights.pop(__UpperCAmelCase , __UpperCAmelCase )
tf_weights.pop(name + """/RMSProp""" , __UpperCAmelCase )
tf_weights.pop(name + """/RMSProp_1""" , __UpperCAmelCase )
tf_weights.pop(name + """/ExponentialMovingAverage""" , __UpperCAmelCase )
logger.info(f'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def a ( __UpperCAmelCase : torch.Tensor , __UpperCAmelCase : nn.Convad ) -> torch.Tensor:
__magic_name__, __magic_name__: List[str] = features.shape[-2:]
__magic_name__, __magic_name__: List[str] = conv_layer.stride
__magic_name__, __magic_name__: Any = conv_layer.kernel_size
if in_height % stride_height == 0:
__magic_name__: int = max(kernel_height - stride_height , 0 )
else:
__magic_name__: str = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__magic_name__: Tuple = max(kernel_width - stride_width , 0 )
else:
__magic_name__: Any = max(kernel_width - (in_width % stride_width) , 0 )
__magic_name__: Tuple = pad_along_width // 2
__magic_name__: int = pad_along_width - pad_left
__magic_name__: Any = pad_along_height // 2
__magic_name__: Union[str, Any] = pad_along_height - pad_top
__magic_name__: Optional[int] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__UpperCAmelCase , __UpperCAmelCase , """constant""" , 0.0 )
class __A ( nn.Module ):
def __init__( self : List[Any] , __snake_case : MobileNetVaConfig , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : Optional[int] = 1 , __snake_case : Optional[int] = 1 , __snake_case : bool = False , __snake_case : Optional[bool] = True , __snake_case : Optional[bool or str] = True , ) -> None:
super().__init__()
__magic_name__: List[Any] = config
if in_channels % groups != 0:
raise ValueError(F'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(F'Output channels ({out_channels}) are not divisible by {groups} groups.' )
__magic_name__: List[Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__magic_name__: str = nn.Convad(
in_channels=__snake_case , out_channels=__snake_case , kernel_size=__snake_case , stride=__snake_case , padding=__snake_case , groups=__snake_case , bias=__snake_case , padding_mode="""zeros""" , )
if use_normalization:
__magic_name__: List[Any] = nn.BatchNormad(
num_features=__snake_case , eps=config.layer_norm_eps , momentum=0.9997 , affine=__snake_case , track_running_stats=__snake_case , )
else:
__magic_name__: int = None
if use_activation:
if isinstance(__snake_case , __snake_case ):
__magic_name__: Optional[Any] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __snake_case ):
__magic_name__: Union[str, Any] = ACTaFN[config.hidden_act]
else:
__magic_name__: Any = config.hidden_act
else:
__magic_name__: Tuple = None
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : torch.Tensor ) -> torch.Tensor:
if self.config.tf_padding:
__magic_name__: Optional[Any] = apply_tf_padding(__snake_case , self.convolution )
__magic_name__: Union[str, Any] = self.convolution(__snake_case )
if self.normalization is not None:
__magic_name__: Optional[int] = self.normalization(__snake_case )
if self.activation is not None:
__magic_name__: Tuple = self.activation(__snake_case )
return features
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = MobileNetVaConfig
UpperCAmelCase__ = load_tf_weights_in_mobilenet_va
UpperCAmelCase__ = "mobilenet_v1"
UpperCAmelCase__ = "pixel_values"
UpperCAmelCase__ = False
def lowerCamelCase__ ( self : Any , __snake_case : Union[nn.Linear, nn.Convad] ) -> None:
if isinstance(__snake_case , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__snake_case , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__lowerCamelCase = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowerCamelCase = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." ,SCREAMING_SNAKE_CASE_ ,)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[int] , __snake_case : MobileNetVaConfig , __snake_case : bool = True ) -> Union[str, Any]:
super().__init__(__snake_case )
__magic_name__: Optional[Any] = config
__magic_name__: int = 3_2
__magic_name__: List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
__magic_name__: List[Any] = MobileNetVaConvLayer(
__snake_case , in_channels=config.num_channels , out_channels=__snake_case , kernel_size=3 , stride=2 , )
__magic_name__: Optional[Any] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__magic_name__: str = nn.ModuleList()
for i in range(1_3 ):
__magic_name__: int = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__magic_name__: List[str] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__snake_case , in_channels=__snake_case , out_channels=__snake_case , kernel_size=3 , stride=strides[i] , groups=__snake_case , ) )
self.layer.append(
MobileNetVaConvLayer(
__snake_case , in_channels=__snake_case , out_channels=__snake_case , kernel_size=1 , ) )
__magic_name__: int = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowerCamelCase__ ( self : Tuple , __snake_case : Dict ) -> Union[str, Any]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase__ ( self : str , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
__magic_name__: List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__magic_name__: List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
__magic_name__: Any = self.conv_stem(__snake_case )
__magic_name__: Optional[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__magic_name__: List[Any] = layer_module(__snake_case )
if output_hidden_states:
__magic_name__: Dict = all_hidden_states + (hidden_states,)
__magic_name__: Dict = hidden_states
if self.pooler is not None:
__magic_name__: Tuple = torch.flatten(self.pooler(__snake_case ) , start_dim=1 )
else:
__magic_name__: Any = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__snake_case , pooler_output=__snake_case , hidden_states=__snake_case , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,SCREAMING_SNAKE_CASE_ ,)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , __snake_case : MobileNetVaConfig ) -> None:
super().__init__(__snake_case )
__magic_name__: Dict = config.num_labels
__magic_name__: Dict = MobileNetVaModel(__snake_case )
__magic_name__: Dict = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__magic_name__: List[Any] = nn.Dropout(config.classifier_dropout_prob , inplace=__snake_case )
__magic_name__: Optional[int] = nn.Linear(__snake_case , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase__ ( self : Tuple , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
__magic_name__: List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__magic_name__: int = self.mobilenet_va(__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case )
__magic_name__: List[str] = outputs.pooler_output if return_dict else outputs[1]
__magic_name__: Tuple = self.classifier(self.dropout(__snake_case ) )
__magic_name__: Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__magic_name__: Any = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__magic_name__: Optional[Any] = """single_label_classification"""
else:
__magic_name__: Any = """multi_label_classification"""
if self.config.problem_type == "regression":
__magic_name__: List[Any] = MSELoss()
if self.num_labels == 1:
__magic_name__: Any = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__magic_name__: Tuple = loss_fct(__snake_case , __snake_case )
elif self.config.problem_type == "single_label_classification":
__magic_name__: Dict = CrossEntropyLoss()
__magic_name__: Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__magic_name__: Union[str, Any] = BCEWithLogitsLoss()
__magic_name__: List[str] = loss_fct(__snake_case , __snake_case )
if not return_dict:
__magic_name__: List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__snake_case , logits=__snake_case , hidden_states=outputs.hidden_states , )
| 213
|
"""simple docstring"""
def a ( __UpperCAmelCase : int | float | str ) -> tuple[int, int]:
try:
__magic_name__: Dict = float(__UpperCAmelCase )
except ValueError:
raise ValueError("""Please enter a valid number""" )
__magic_name__: Tuple = decimal - int(__UpperCAmelCase )
if fractional_part == 0:
return int(__UpperCAmelCase ), 1
else:
__magic_name__: Tuple = len(str(__UpperCAmelCase ).split(""".""" )[1] )
__magic_name__: int = int(decimal * (1_0**number_of_frac_digits) )
__magic_name__: List[Any] = 1_0**number_of_frac_digits
__magic_name__, __magic_name__: Union[str, Any] = denominator, numerator
while True:
__magic_name__: Tuple = dividend % divisor
if remainder == 0:
break
__magic_name__, __magic_name__: Dict = divisor, remainder
__magic_name__, __magic_name__: Union[str, Any] = numerator / divisor, denominator / divisor
return int(__UpperCAmelCase ), int(__UpperCAmelCase )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(8_9.0) = }''')
print(f'''{decimal_to_fraction('67') = }''')
print(f'''{decimal_to_fraction('45.0') = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction('6.25') = }''')
print(f'''{decimal_to_fraction('78td') = }''')
| 213
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : int = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Dict = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
lowercase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Any = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Dict = self.dummy_uncond_unet
A_ : Any = DDIMScheduler()
A_ : List[str] = self.dummy_vq_model
A_ : Union[str, Any] = LDMPipeline(unet=snake_case , vqvae=snake_case , scheduler=snake_case )
ldm.to(snake_case )
ldm.set_progress_bar_config(disable=snake_case )
A_ : Any = torch.manual_seed(0 )
A_ : Optional[Any] = ldm(generator=snake_case , num_inference_steps=2 , output_type="numpy" ).images
A_ : Dict = torch.manual_seed(0 )
A_ : Any = ldm(generator=snake_case , num_inference_steps=2 , output_type="numpy" , return_dict=snake_case )[0]
A_ : int = image[0, -3:, -3:, -1]
A_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : str = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
A_ : List[str] = 1e-2 if torch_device != "mps" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : int = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(snake_case )
ldm.set_progress_bar_config(disable=snake_case )
A_ : int = torch.manual_seed(0 )
A_ : List[str] = ldm(generator=snake_case , num_inference_steps=5 , output_type="numpy" ).images
A_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A_ : Any = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
A_ : List[Any] = 1e-2 if torch_device != "mps" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 454
| 0
|
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''', [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
], )
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''', '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =DatasetInfosDict.from_directory(UpperCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
'''dataset_info''', [
DatasetInfo(),
DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=4_2, ),
], )
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : DatasetInfo ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =str(UpperCamelCase__ )
dataset_info.write_to_directory(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =DatasetInfo.from_directory(UpperCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCamelCase__, '''dataset_info.json''' ) )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =DatasetInfo(
description='''foo''', citation='''bar''', homepage='''https://foo.bar''', license='''CC0''', features=Features({'''a''': Value('''int32''' )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train''', '''num_examples''': 4_2}], download_checksums={}, download_size=1_3_3_7, post_processing_size=4_4_2, dataset_size=1_2_3_4, size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4, )
SCREAMING_SNAKE_CASE__ : int =dataset_info._to_yaml_dict()
assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
SCREAMING_SNAKE_CASE__ : List[Any] =yaml.safe_dump(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =yaml.safe_load(UpperCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =DatasetInfo()
SCREAMING_SNAKE_CASE__ : Optional[Any] =dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''', [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=4_2, )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=4_2 ),
'''v2''': DatasetInfo(dataset_size=1_3_3_7 ),
} ),
], )
def _a( UpperCamelCase__ : int, UpperCamelCase__ : DatasetInfosDict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =str(UpperCamelCase__ )
dataset_infos_dict.write_to_directory(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =DatasetInfosDict.from_directory(UpperCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
SCREAMING_SNAKE_CASE__ : int =config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
SCREAMING_SNAKE_CASE__ : Dict =DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCamelCase__, '''README.md''' ) )
| 665
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
with open(UpperCamelCase__ ) as metadata_file:
SCREAMING_SNAKE_CASE__ : Optional[int] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =LukeConfig(use_entity_aware_attention=UpperCamelCase__, **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.load(UpperCamelCase__, map_location='''cpu''' )['''module''']
# Load the entity vocab file
SCREAMING_SNAKE_CASE__ : List[str] =load_original_entity_vocab(UpperCamelCase__ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE__ : Optional[int] =max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE__ : Optional[int] =XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken('''<ent>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =AddedToken('''<ent2>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int ='''MLukeTokenizer'''
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE__ : str =tokenizer.convert_tokens_to_ids(['''@'''] )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.convert_tokens_to_ids(['''#'''] )[0]
SCREAMING_SNAKE_CASE__ : Dict =state_dict['''embeddings.word_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : List[str] =word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Tuple =word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[bias_name]
SCREAMING_SNAKE_CASE__ : List[Any] =decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[str] =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE__ : Tuple =f"encoder.layer.{layer_index}.attention.self."
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE__ : Any =state_dict['''entity_embeddings.entity_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : Any =entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict['''entity_predictions.bias''']
SCREAMING_SNAKE_CASE__ : Tuple =entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE__ : int =LukeForMaskedLM(config=UpperCamelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
SCREAMING_SNAKE_CASE__ : Tuple =OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[key]
else:
SCREAMING_SNAKE_CASE__ : Any =state_dict[key]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ )
if set(UpperCamelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(UpperCamelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE__ : Any =MLukeTokenizer.from_pretrained(UpperCamelCase__, task='''entity_classification''' )
SCREAMING_SNAKE_CASE__ : str ='''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(0, 9)
SCREAMING_SNAKE_CASE__ : str =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[Any] =model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : str =torch.Size((1, 3_3, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : Any =torch.Size((1, 1, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE__ : str =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''Tokyo is the capital of <mask>.'''
SCREAMING_SNAKE_CASE__ : Dict =(2_4, 3_0)
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =encoding['''input_ids'''][0].tolist()
SCREAMING_SNAKE_CASE__ : Any =input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE__ : Dict =[
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =['''[MASK]''', '''[PAD]''', '''[UNK]''']
SCREAMING_SNAKE_CASE__ : List[str] =[json.loads(UpperCamelCase__ ) for line in open(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
for entry in data:
SCREAMING_SNAKE_CASE__ : Tuple =entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE__ : str =entity_id
break
SCREAMING_SNAKE_CASE__ : Union[str, Any] =f"{language}:{entity_name}"
SCREAMING_SNAKE_CASE__ : Union[str, Any] =entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 665
| 1
|
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
_validate_point(snake_case )
_validate_point(snake_case )
if len(snake_case ) != len(snake_case ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(snake_case, snake_case ) ) )
def __lowercase ( snake_case ):
"""simple docstring"""
if point:
if isinstance(snake_case, snake_case ):
for item in point:
if not isinstance(snake_case, (int, float) ):
__magic_name__ :str = (
'''Expected a list of numbers as input, found '''
f'''{type(snake_case ).__name__}'''
)
raise TypeError(snake_case )
else:
__magic_name__ :List[Any] = f'''Expected a list of numbers as input, found {type(snake_case ).__name__}'''
raise TypeError(snake_case )
else:
raise ValueError('''Missing an input''' )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
_validate_point(snake_case )
_validate_point(snake_case )
if len(snake_case ) != len(snake_case ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(snake_case, snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0
|
def lowerCAmelCase__ ( _a : float , _a : float , _a : float , _a : float , _a : float , ):
snake_case_ : int = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
snake_case_ : List[Any] = 1 - (matter_density + radiation_density + dark_energy)
snake_case_ : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
snake_case_ : Optional[int] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowercase : Union[str, Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 568
| 0
|
'''simple docstring'''
import argparse
from collections import defaultdict
def __A ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = f'{file}_{class_name}_{test_name}'
done_test[_id] += 1
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
__SCREAMING_SNAKE_CASE : Optional[Any] = f.readlines()
__SCREAMING_SNAKE_CASE : str = f'class {class_name}('
__SCREAMING_SNAKE_CASE : str = f'{4 * " "}def {test_name}('
__SCREAMING_SNAKE_CASE : Any = f'{8 * " "}{correct_line.split()[0]}'
__SCREAMING_SNAKE_CASE : Optional[Any] = f'{1_6 * " "}{correct_line.split()[0]}'
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : int = []
for line in lines:
if line.startswith(_SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Optional[int] = True
elif in_class and line.startswith(_SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
elif in_class and in_func and (line.startswith(_SCREAMING_SNAKE_CASE ) or line.startswith(_SCREAMING_SNAKE_CASE )):
__SCREAMING_SNAKE_CASE : str = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
__SCREAMING_SNAKE_CASE : int = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
__SCREAMING_SNAKE_CASE : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'{spaces * " "}{correct_line}' )
__SCREAMING_SNAKE_CASE : List[Any] = False
else:
new_lines.append(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
for line in new_lines:
f.write(_SCREAMING_SNAKE_CASE )
def __A ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int]=None ):
"""simple docstring"""
if fail is not None:
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
__SCREAMING_SNAKE_CASE : int = {l.strip() for l in f.readlines()}
else:
__SCREAMING_SNAKE_CASE : str = None
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
__SCREAMING_SNAKE_CASE : str = f.readlines()
__SCREAMING_SNAKE_CASE : str = defaultdict(_SCREAMING_SNAKE_CASE )
for line in correct_lines:
__SCREAMING_SNAKE_CASE : Any = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
lowercase = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 716
|
'''simple docstring'''
import os
import numpy
import onnx
def __A ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = a.name
__SCREAMING_SNAKE_CASE : List[Any] = b.name
__SCREAMING_SNAKE_CASE : int = ""
__SCREAMING_SNAKE_CASE : str = ""
__SCREAMING_SNAKE_CASE : List[Any] = a == b
__SCREAMING_SNAKE_CASE : Any = name_a
__SCREAMING_SNAKE_CASE : Optional[Any] = name_b
return res
def __A ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_graph_replace_input_with(node_proto.attribute[1].g , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = list(model.graph.initializer )
__SCREAMING_SNAKE_CASE : Tuple = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__SCREAMING_SNAKE_CASE : str = inits[i].name
__SCREAMING_SNAKE_CASE : str = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = os.path.dirname(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Tuple = os.path.basename(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Dict = onnx.load(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE : Dict = list(model.graph.initializer )
__SCREAMING_SNAKE_CASE : int = set()
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : str = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_SCREAMING_SNAKE_CASE ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_SCREAMING_SNAKE_CASE )
dup_set.add(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : int = inits[j].data_type
__SCREAMING_SNAKE_CASE : Any = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print("unexpected data type: " , _SCREAMING_SNAKE_CASE )
total_reduced_size += mem_size
__SCREAMING_SNAKE_CASE : Any = inits[i].name
__SCREAMING_SNAKE_CASE : Optional[int] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE : List[Any] = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , "GB" )
__SCREAMING_SNAKE_CASE : Optional[int] = sorted(_SCREAMING_SNAKE_CASE )
_remove_dup_initializers_from_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Any = "optimized_" + model_file_name
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
onnx.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return new_model
| 564
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __UpperCamelCase ( A__ ):
__A : Optional[int] = ["""pixel_values"""]
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = True , **_UpperCamelCase , ):
super().__init__(**_UpperCamelCase )
_UpperCAmelCase = size if size is not None else {'''shortest_edge''': 224}
_UpperCAmelCase = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCAmelCase = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='''crop_size''' )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
_UpperCAmelCase = do_convert_rgb
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_UpperCAmelCase = get_resize_output_image_size(_UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=_UpperCamelCase )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(_UpperCamelCase , param_name='''size''' , default_to_square=_UpperCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(_UpperCamelCase , param_name='''crop_size''' , default_to_square=_UpperCamelCase )
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCAmelCase = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCAmelCase = [convert_to_rgb(_UpperCamelCase ) for image in images]
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
_UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
| 32
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModel)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 32
| 1
|
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCAmelCase_ = 'src/transformers'
# Matches is_xxx_available()
lowerCAmelCase_ = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase_ = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase_ = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
lowerCAmelCase_ = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase_ = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase_ = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase_ = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase_ = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase_ = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
lowerCAmelCase_ = re.compile(R'^\s*try:')
# Catches a line with else:
lowerCAmelCase_ = re.compile(R'^\s*else:')
def snake_case( __magic_name__ ) -> List[str]:
'''simple docstring'''
if _re_test_backend.search(__magic_name__ ) is None:
return None
lowercase : List[Any] = [b[0] for b in _re_backend.findall(__magic_name__ )]
backends.sort()
return "_and_".join(__magic_name__ )
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
with open(__magic_name__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase : str = f.readlines()
lowercase : Union[str, Any] = 0
while line_index < len(__magic_name__ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__magic_name__ ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase : List[str] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowercase : Optional[Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__magic_name__ ):
lowercase : Any = _re_one_line_import_struct.search(__magic_name__ ).groups()[0]
lowercase : Any = re.findall('''\[([^\]]+)\]''' , __magic_name__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowercase : int = _re_import_struct_key_value.search(__magic_name__ )
if single_line_import_search is not None:
lowercase : List[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowercase : Tuple = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase : Union[str, Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase : int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowercase : Tuple = lines[line_index]
if _re_import_struct_add_one.search(__magic_name__ ) is not None:
objects.append(_re_import_struct_add_one.search(__magic_name__ ).groups()[0] )
elif _re_import_struct_add_many.search(__magic_name__ ) is not None:
lowercase : str = _re_import_struct_add_many.search(__magic_name__ ).groups()[0].split(''', ''' )
lowercase : Optional[Any] = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_between_brackets.search(__magic_name__ ) is not None:
lowercase : Tuple = _re_between_brackets.search(__magic_name__ ).groups()[0].split(''', ''' )
lowercase : List[Any] = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_quote_object.search(__magic_name__ ) is not None:
objects.append(_re_quote_object.search(__magic_name__ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowercase : Optional[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase : List[str] = []
while (
line_index < len(__magic_name__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowercase : List[str] = lines[line_index]
lowercase : int = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase : Optional[int] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(__magic_name__ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowercase : Union[str, Any] = lines[line_index]
lowercase : List[Any] = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase : int = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def snake_case( __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
def find_duplicates(__magic_name__ ):
return [k for k, v in collections.Counter(__magic_name__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase : List[str] = []
for key in import_dict_objects.keys():
lowercase : Tuple = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowercase : str = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase : str = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def snake_case( ) -> Any:
'''simple docstring'''
lowercase : Optional[Any] = []
for root, _, files in os.walk(__magic_name__ ):
if "__init__.py" in files:
lowercase : Tuple = os.path.join(__magic_name__ , '''__init__.py''' )
lowercase : Tuple = parse_init(__magic_name__ )
if objects is not None:
lowercase : str = analyze_results(*__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase : Optional[Any] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(__magic_name__ ) )
if len(__magic_name__ ) > 0:
raise ValueError('''\n\n'''.join(__magic_name__ ) )
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
lowercase : str = []
for path, directories, files in os.walk(__magic_name__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(__magic_name__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__magic_name__ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowercase : Optional[int] = str((Path(__magic_name__ ) / folder).relative_to(__magic_name__ ) )
lowercase : Union[str, Any] = short_path.replace(os.path.sep , '''.''' )
submodules.append(__magic_name__ )
for fname in files:
if fname == "__init__.py":
continue
lowercase : Union[str, Any] = str((Path(__magic_name__ ) / fname).relative_to(__magic_name__ ) )
lowercase : List[str] = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(__magic_name__ )
return submodules
lowerCAmelCase_ = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def snake_case( ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(__magic_name__ , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowercase : Tuple = spec.loader.load_module()
lowercase : str = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__magic_name__ ) > 0:
lowercase : Any = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 596
|
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
lowerCAmelCase_ = '.'
if __name__ == "__main__":
lowerCAmelCase_ = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
lowerCAmelCase_ = []
lowerCAmelCase_ = []
with open(doctest_file_path) as fp:
for line in fp:
lowerCAmelCase_ = line.strip()
lowerCAmelCase_ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
lowerCAmelCase_ = '\n'.join(non_existent_paths)
raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 596
| 1
|
'''simple docstring'''
_lowercase = 0 # The first color of the flag.
_lowercase = 1 # The second color of the flag.
_lowercase = 2 # The third color of the flag.
_lowercase = (red, white, blue)
def lowerCamelCase__ ( a ):
if not sequence:
return []
if len(a ) == 1:
return list(a )
__snake_case = 0
__snake_case = len(a ) - 1
__snake_case = 0
while mid <= high:
if sequence[mid] == colors[0]:
__snake_case , __snake_case = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__snake_case , __snake_case = sequence[high], sequence[mid]
high -= 1
else:
__snake_case = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = input("""Enter numbers separated by commas:\n""").strip()
_lowercase = [int(item.strip()) for item in user_input.split(""",""")]
print(f'''{dutch_national_flag_sort(unsorted)}''')
| 356
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_lowercase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowercase = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
_lowercase = {
"""google/electra-small-generator""": 5_12,
"""google/electra-base-generator""": 5_12,
"""google/electra-large-generator""": 5_12,
"""google/electra-small-discriminator""": 5_12,
"""google/electra-base-discriminator""": 5_12,
"""google/electra-large-discriminator""": 5_12,
}
_lowercase = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class a_ ( UpperCAmelCase__ ):
lowercase_ : Tuple = VOCAB_FILES_NAMES
lowercase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : int = PRETRAINED_INIT_CONFIGURATION
lowercase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : List[Any] = ElectraTokenizer
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : str=None , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : int="[UNK]" , __lowerCAmelCase : str="[SEP]" , __lowerCAmelCase : Optional[Any]="[PAD]" , __lowerCAmelCase : Optional[Any]="[CLS]" , __lowerCAmelCase : str="[MASK]" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : List[Any] , ):
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , tokenize_chinese_chars=__lowerCAmelCase , strip_accents=__lowerCAmelCase , **__lowerCAmelCase , )
__snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __lowerCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , __lowerCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __lowerCAmelCase ) != tokenize_chinese_chars
):
__snake_case = getattr(__lowerCAmelCase , normalizer_state.pop('type' ) )
__snake_case = do_lower_case
__snake_case = strip_accents
__snake_case = tokenize_chinese_chars
__snake_case = normalizer_class(**__lowerCAmelCase )
__snake_case = do_lower_case
def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple=None ):
__snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
__snake_case = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
| 356
| 1
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowercase_ = logging.get_logger(__name__)
lowercase_ = {}
lowercase_ = {}
lowercase_ = {}
def _snake_case( SCREAMING_SNAKE_CASE__ : type , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , ) -> str:
'''simple docstring'''
A__ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
A__ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
A__ = format_type
def _snake_case( SCREAMING_SNAKE_CASE__ : Exception , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None ) -> Dict:
'''simple docstring'''
A__ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
A__ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
lowercase_ = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
lowercase_ = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
lowercase_ = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[str] ) -> Optional[str]:
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : int ) -> Formatter:
'''simple docstring'''
A__ = get_format_type_from_alias(SCREAMING_SNAKE_CASE__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**SCREAMING_SNAKE_CASE__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 717
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger()
@dataclass
class A :
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = field(default_factory=_UpperCAmelCase )
lowerCamelCase = field(default_factory=_UpperCAmelCase )
def snake_case__ ( self : Union[str, Any],lowercase_ : Dict,lowercase_ : Tensor,lowercase_ : Tensor )-> Tuple:
'''simple docstring'''
A__ = len(list(m.modules() ) ) == 1 or isinstance(lowercase_,nn.Convad ) or isinstance(lowercase_,nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowercase_ )
def __call__( self : Tuple,lowercase_ : Tensor )-> Any:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowercase_ )
[x.remove() for x in self.handles]
return self
@property
def snake_case__ ( self : Optional[int] )-> Optional[int]:
'''simple docstring'''
return list(filter(lambda lowercase_ : len(list(x.state_dict().keys() ) ) > 0,self.traced ) )
@dataclass
class A :
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 1
lowerCamelCase = field(default_factory=_UpperCAmelCase )
lowerCamelCase = field(default_factory=_UpperCAmelCase )
lowerCamelCase = True
def __call__( self : str,lowercase_ : Tensor )-> Dict:
'''simple docstring'''
A__ = Tracker(self.dest )(lowercase_ ).parametrized
A__ = Tracker(self.src )(lowercase_ ).parametrized
A__ = list(filter(lambda lowercase_ : type(lowercase_ ) not in self.src_skip,lowercase_ ) )
A__ = list(filter(lambda lowercase_ : type(lowercase_ ) not in self.dest_skip,lowercase_ ) )
if len(lowercase_ ) != len(lowercase_ ) and self.raise_if_mismatch:
raise Exception(
F'Numbers of operations are different. Source module has {len(lowercase_ )} operations while'
F' destination module has {len(lowercase_ )}.' )
for dest_m, src_m in zip(lowercase_,lowercase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
class A ( nn.Module ):
"""simple docstring"""
def __init__( self : Any,lowercase_ : nn.Module )-> int:
'''simple docstring'''
super().__init__()
A__ = []
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), F'Unexpected layer name {k}'
A__ = len(lowercase_ ) + 1
feature_blocks.append((F'res{block_index}', v) )
A__ = nn.ModuleDict(lowercase_ )
def snake_case__ ( self : List[Any],lowercase_ : Tensor )-> Any:
'''simple docstring'''
return get_trunk_forward_outputs(
lowercase_,out_feat_keys=lowercase_,feature_blocks=self._feature_blocks,)
class A ( _UpperCAmelCase ):
"""simple docstring"""
def snake_case__ ( self : List[Any],lowercase_ : str )-> str:
'''simple docstring'''
A__ = x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Optional[Any],lowercase_ : str )-> Callable[[], Tuple[nn.Module, Dict]]:
'''simple docstring'''
if x not in self:
A__ = self.convert_name_to_timm(lowercase_ )
A__ = partial(lambda: (timm.create_model(lowercase_,pretrained=lowercase_ ).eval(), None) )
else:
A__ = super().__getitem__(lowercase_ )
return val
class A ( _UpperCAmelCase ):
"""simple docstring"""
def __getitem__( self : Tuple,lowercase_ : str )-> Callable[[], nn.Module]:
'''simple docstring'''
if "seer" in x and "in1k" not in x:
A__ = RegNetModel
else:
A__ = RegNetForImageClassification
return val
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Tuple[str, str]] ) -> Dict:
'''simple docstring'''
for from_key, to_key in keys:
A__ = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}' )
return to_state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Callable[[], nn.Module] , SCREAMING_SNAKE_CASE__ : Callable[[], nn.Module] , SCREAMING_SNAKE_CASE__ : RegNetConfig , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : bool = True , ) -> Any:
'''simple docstring'''
print(f'Converting {name}...' )
with torch.no_grad():
A__ , A__ = from_model_func()
A__ = our_model_func(SCREAMING_SNAKE_CASE__ ).eval()
A__ = ModuleTransfer(src=SCREAMING_SNAKE_CASE__ , dest=SCREAMING_SNAKE_CASE__ , raise_if_mismatch=SCREAMING_SNAKE_CASE__ )
A__ = torch.randn((1, 3, 224, 224) )
module_transfer(SCREAMING_SNAKE_CASE__ )
if from_state_dict is not None:
A__ = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
A__ = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
A__ = manually_copy_vissl_head(SCREAMING_SNAKE_CASE__ , our_model.state_dict() , SCREAMING_SNAKE_CASE__ )
our_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
A__ = our_model(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ )
A__ = (
our_outputs.logits if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else our_outputs.last_hidden_state
)
A__ = from_model(SCREAMING_SNAKE_CASE__ )
A__ = from_output[-1] if type(SCREAMING_SNAKE_CASE__ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
A__ = our_outputs.hidden_states[-1]
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
A__ = 224 if 'seer' not in name else 384
# we can use the convnext one
A__ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=SCREAMING_SNAKE_CASE__ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
print(f'Pushed {name}' )
def _snake_case( SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = True ) -> List[Any]:
'''simple docstring'''
A__ = 'imagenet-1k-id2label.json'
A__ = 1000
A__ = (1, num_labels)
A__ = 'huggingface/label-files'
A__ = num_labels
A__ = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) ) , 'r' ) )
A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = partial(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ )
A__ = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
}
A__ = NameToOurModelFuncMap()
A__ = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , model_dir=str(SCREAMING_SNAKE_CASE__ ) , map_location='cpu' )
A__ = model_func()
# check if we have a head, if yes add it
A__ = files['classy_state_dict']['base_model']['model']
A__ = model_state_dict['trunk']
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
return model.eval(), model_state_dict["heads"]
# pretrained
A__ = partial(
SCREAMING_SNAKE_CASE__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
A__ = partial(
SCREAMING_SNAKE_CASE__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
A__ = partial(
SCREAMING_SNAKE_CASE__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
A__ = partial(
SCREAMING_SNAKE_CASE__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
A__ = partial(
SCREAMING_SNAKE_CASE__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
A__ = partial(
SCREAMING_SNAKE_CASE__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
A__ = partial(
SCREAMING_SNAKE_CASE__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
A__ = partial(
SCREAMING_SNAKE_CASE__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
SCREAMING_SNAKE_CASE__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
SCREAMING_SNAKE_CASE__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
return config, expected_shape
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
lowercase_ = parser.parse_args()
lowercase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 586
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : int = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__lowercase : str = 1_92
__lowercase : Tuple = 7_68
__lowercase : str = 12
__lowercase : str = 3
__lowercase : Optional[Any] = [8_00, 13_33]
__lowercase : Optional[Any] = False
elif yolos_name == "yolos_s_dWr":
__lowercase : Any = 3_30
__lowercase : List[str] = 14
__lowercase : Tuple = 6
__lowercase : Any = 13_20
elif "yolos_s" in yolos_name:
__lowercase : Tuple = 3_84
__lowercase : Any = 15_36
__lowercase : List[str] = 12
__lowercase : Dict = 6
elif "yolos_b" in yolos_name:
__lowercase : int = [8_00, 13_44]
__lowercase : Any = 91
__lowercase : str = '''huggingface/label-files'''
__lowercase : str = '''coco-detection-id2label.json'''
__lowercase : Optional[Any] = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__lowercase : List[Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__lowercase : str = idalabel
__lowercase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase : str = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
__lowercase : int = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowercase : Tuple = in_proj_weight[: config.hidden_size, :]
__lowercase : Any = in_proj_bias[: config.hidden_size]
__lowercase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase : Any = in_proj_weight[-config.hidden_size :, :]
__lowercase : List[str] = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( __UpperCamelCase ):
if "backbone" in name:
__lowercase : List[str] = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
__lowercase : Tuple = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
__lowercase : Optional[Any] = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
__lowercase : List[str] = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
__lowercase : Dict = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
__lowercase : Optional[int] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
__lowercase : Tuple = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
__lowercase : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__lowercase : Optional[int] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__lowercase : Tuple = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__lowercase : Dict = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__lowercase : Optional[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__lowercase : int = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
__lowercase : Union[str, Any] = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
__lowercase : Dict = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
__lowercase : Optional[int] = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
for key in orig_state_dict.copy().keys():
__lowercase : Tuple = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
__lowercase : List[Any] = key.split('''.''' )
__lowercase : Union[str, Any] = int(key_split[2] )
__lowercase : Union[str, Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__lowercase : str = val[:dim, :]
__lowercase : int = val[
dim : dim * 2, :
]
__lowercase : Union[str, Any] = val[-dim:, :]
else:
__lowercase : Any = val[:dim]
__lowercase : Optional[Any] = val[dim : dim * 2]
__lowercase : int = val[-dim:]
else:
__lowercase : Tuple = val
return orig_state_dict
def __UpperCAmelCase ( ):
__lowercase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase : str = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ):
__lowercase : List[Any] = get_yolos_config(__UpperCamelCase )
# load original state_dict
__lowercase : Optional[int] = torch.load(__UpperCamelCase , map_location='''cpu''' )['''model''']
# load 🤗 model
__lowercase : Any = YolosForObjectDetection(__UpperCamelCase )
model.eval()
__lowercase : int = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
__lowercase : List[str] = 8_00 if yolos_name != '''yolos_ti''' else 5_12
__lowercase : Union[str, Any] = YolosImageProcessor(format='''coco_detection''' , size=__UpperCamelCase )
__lowercase : Dict = image_processor(images=prepare_img() , return_tensors='''pt''' )
__lowercase : str = model(**__UpperCamelCase )
__lowercase ,__lowercase : Dict = outputs.logits, outputs.pred_boxes
__lowercase ,__lowercase : Dict = None, None
if yolos_name == "yolos_ti":
__lowercase : Optional[int] = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
__lowercase : Dict = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
__lowercase : Union[str, Any] = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
__lowercase : Tuple = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
__lowercase : Union[str, Any] = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
__lowercase : List[str] = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
__lowercase : Dict = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
__lowercase : str = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
__lowercase : int = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
__lowercase : Optional[Any] = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(f"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1e-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
__lowercase : Any = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
__lowercase : Union[str, Any] = model_mapping[yolos_name]
image_processor.push_to_hub(__UpperCamelCase , organization='''hustvl''' )
model.push_to_hub(__UpperCamelCase , organization='''hustvl''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 76
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __lowerCamelCase (UpperCAmelCase__ : Dict ):
SCREAMING_SNAKE_CASE = args.pruning_method
SCREAMING_SNAKE_CASE = args.threshold
SCREAMING_SNAKE_CASE = args.model_name_or_path.rstrip("/" )
SCREAMING_SNAKE_CASE = args.target_model_path
print(F"Load fine-pruned model from {model_name_or_path}" )
SCREAMING_SNAKE_CASE = torch.load(os.path.join(UpperCAmelCase__ , "pytorch_model.bin" ) )
SCREAMING_SNAKE_CASE = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
SCREAMING_SNAKE_CASE = tensor
print(F"Copied layer {name}" )
elif "classifier" in name or "qa_output" in name:
SCREAMING_SNAKE_CASE = tensor
print(F"Copied layer {name}" )
elif "bias" in name:
SCREAMING_SNAKE_CASE = tensor
print(F"Copied layer {name}" )
else:
if pruning_method == "magnitude":
SCREAMING_SNAKE_CASE = MagnitudeBinarizer.apply(inputs=UpperCAmelCase__ , threshold=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE = name[:-6]
SCREAMING_SNAKE_CASE = model[F"{prefix_}mask_scores"]
SCREAMING_SNAKE_CASE = TopKBinarizer.apply(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE = name[:-6]
SCREAMING_SNAKE_CASE = model[F"{prefix_}mask_scores"]
SCREAMING_SNAKE_CASE = ThresholdBinarizer.apply(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE = name[:-6]
SCREAMING_SNAKE_CASE = model[F"{prefix_}mask_scores"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -0.1, 1.1
SCREAMING_SNAKE_CASE = torch.sigmoid(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = s * (r - l) + l
SCREAMING_SNAKE_CASE = s_bar.clamp(min=0.0 , max=1.0 )
SCREAMING_SNAKE_CASE = tensor * mask
print(F"Pruned layer {name}" )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
SCREAMING_SNAKE_CASE = os.path.join(
os.path.dirname(UpperCAmelCase__ ) , F"bertarized_{os.path.basename(UpperCAmelCase__ )}" )
if not os.path.isdir(UpperCAmelCase__ ):
shutil.copytree(UpperCAmelCase__ , UpperCAmelCase__ )
print(F"\nCreated folder {target_model_path}" )
torch.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , "pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_lowerCamelCase : Any = parser.parse_args()
main(args)
| 403
| 0
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
A__ : List[Any] = logging.get_logger(__name__)
A__ : Union[str, Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
A__ : Any = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def a_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Tuple ) -> List[Any]:
for attribute in key.split('.' ):
__snake_case : int = getattr(_UpperCAmelCase ,_UpperCAmelCase )
if weight_type is not None:
__snake_case : str = getattr(_UpperCAmelCase ,_UpperCAmelCase ).shape
else:
__snake_case : int = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__snake_case : Union[str, Any] = value
elif weight_type == "weight_g":
__snake_case : Union[str, Any] = value
elif weight_type == "weight_v":
__snake_case : List[str] = value
elif weight_type == "bias":
__snake_case : List[Any] = value
else:
__snake_case : List[str] = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Any ) -> Optional[int]:
__snake_case : Dict = []
__snake_case : Union[str, Any] = fairseq_model.state_dict()
__snake_case : List[Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__snake_case : Optional[Any] = None
for name, value in fairseq_dict.items():
__snake_case : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,hf_model.config.feat_extract_norm == 'group' ,)
__snake_case : Tuple = True
elif name.split('.' )[0] == "proj":
__snake_case : Optional[Any] = fairseq_model.proj
__snake_case : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__snake_case : Any = True
if "*" in mapped_key:
__snake_case : Optional[int] = name.split(_UpperCAmelCase )[0].split('.' )[-2]
__snake_case : Union[str, Any] = mapped_key.replace('*' ,_UpperCAmelCase )
if "weight_g" in name:
__snake_case : Any = 'weight_g'
elif "weight_v" in name:
__snake_case : Any = 'weight_v'
elif "bias" in name:
__snake_case : int = 'bias'
elif "weight" in name:
__snake_case : Optional[Any] = 'weight'
else:
__snake_case : Tuple = None
set_recursively(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
continue
if not is_used:
unused_weights.append(_UpperCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def a_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : List[str] ) -> int:
__snake_case : Tuple = full_name.split('conv_layers.' )[-1]
__snake_case : Any = name.split('.' )
__snake_case : Union[str, Any] = int(items[0] )
__snake_case : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__snake_case : Tuple = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__snake_case : Optional[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__snake_case : Any = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__snake_case : Dict = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_UpperCAmelCase )
def a_ ( _UpperCAmelCase : Tuple ) -> int:
__snake_case : List[Any] = emb.weight.shape
__snake_case : Tuple = nn.Linear(_UpperCAmelCase ,_UpperCAmelCase ,bias=_UpperCAmelCase )
__snake_case : int = emb.weight.data
return lin_layer
def a_ ( _UpperCAmelCase : int ) -> Dict:
with open(_UpperCAmelCase ,'r' ,encoding='utf-8' ) as f:
__snake_case : Dict = f.readlines()
__snake_case : Optional[int] = [line.split(' ' )[0] for line in lines]
__snake_case : Dict = len(_UpperCAmelCase )
__snake_case : str = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(_UpperCAmelCase ,range(4 ,num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def a_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ,) -> Optional[Any]:
__snake_case : Dict = WavaVecaConfig.from_pretrained(_UpperCAmelCase )
__snake_case : Tuple = SpeechaTextaConfig.from_pretrained(
_UpperCAmelCase ,vocab_size=_UpperCAmelCase ,decoder_layers=_UpperCAmelCase ,do_stable_layer_norm=_UpperCAmelCase )
__snake_case : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_60_00 ,padding_value=0 ,do_normalize=_UpperCAmelCase ,return_attention_mask=_UpperCAmelCase ,)
__snake_case : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__snake_case : str = model[0].eval()
# set weights for wav2vec2 encoder
__snake_case : Any = WavaVecaModel(_UpperCAmelCase )
__snake_case : Optional[Any] = recursively_load_weights_wavaveca(model.encoder ,_UpperCAmelCase )
__snake_case : str = SpeechaTextaForCausalLM(_UpperCAmelCase )
__snake_case : str = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=_UpperCAmelCase )
# set output linear layer
unexpected_keys.remove('embed_out' )
__snake_case : List[str] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
__snake_case : List[str] = SpeechEncoderDecoderModel(encoder=_UpperCAmelCase ,decoder=_UpperCAmelCase )
__snake_case : Any = False
# add projection layer
__snake_case : str = nn.Parameter(projection_layer.weight )
__snake_case : Dict = nn.Parameter(projection_layer.bias )
__snake_case : Optional[int] = create_vocab_dict(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase ,'vocab.json' ) ,'w' ) as fp:
json.dump(_UpperCAmelCase ,_UpperCAmelCase )
__snake_case : Optional[int] = SpeechaTextaTokenizer(os.path.join(_UpperCAmelCase ,'vocab.json' ) )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : int = hf_wavavec.config.to_dict()
__snake_case : str = tokenizer.pad_token_id
__snake_case : Dict = tokenizer.bos_token_id
__snake_case : Dict = tokenizer.eos_token_id
__snake_case : List[str] = 'speech_to_text_2'
__snake_case : Any = 'wav2vec2'
__snake_case : Optional[int] = SpeechEncoderDecoderConfig.from_dict(_UpperCAmelCase )
hf_wavavec.save_pretrained(_UpperCAmelCase )
feature_extractor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
A__ : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_0_2_2_4, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
A__ : List[str] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 705
|
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : List[str] = SMALL_MODEL_IDENTIFIER
__snake_case : str = 'pt'
__snake_case : int = 'tf'
def A_ ( self : str , __a : Any ) -> Tuple:
'''simple docstring'''
__snake_case : str = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__a )
def A_ ( self : List[Any] , __a : List[Any] ) -> str:
'''simple docstring'''
__snake_case : Any = TFAutoModel.from_pretrained(self.test_model , from_pt=__a )
model_tf.save_pretrained(__a )
def A_ ( self : Any ) -> Any:
'''simple docstring'''
__snake_case : Any = 'mock_framework'
# Framework provided - return whatever the user provides
__snake_case : int = FeaturesManager.determine_framework(self.test_model , __a )
self.assertEqual(__a , __a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : Dict = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : List[Any] = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
def A_ ( self : int ) -> List[str]:
'''simple docstring'''
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : Any = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : List[str] = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__a ):
__snake_case : List[str] = FeaturesManager.determine_framework(__a )
def A_ ( self : List[str] ) -> Any:
'''simple docstring'''
__snake_case : Union[str, Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ):
__snake_case : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__snake_case : int = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_torch_available' , __a ):
__snake_case : List[str] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_tf )
# Both in environment -> use PyTorch
__snake_case : Tuple = MagicMock(return_value=__a )
__snake_case : Optional[Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
__snake_case : Optional[int] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# Both not in environment -> raise error
__snake_case : int = MagicMock(return_value=__a )
__snake_case : Optional[Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
with self.assertRaises(__a ):
__snake_case : Any = FeaturesManager.determine_framework(self.test_model )
| 124
| 0
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowerCamelCase__ = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
lowerCamelCase__ = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
lowerCamelCase__ = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
lowerCamelCase__ = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
lowerCamelCase__ = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=[1, 10, 100] , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3.0 ) -> int:
"""simple docstring"""
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=__SCREAMING_SNAKE_CASE ) as executor:
snake_case__ : str =[]
snake_case__ : str =Counter()
snake_case__ : Tuple =0
snake_case__ : List[Any] =defaultdict(__SCREAMING_SNAKE_CASE )
for task_id, (candidates, test_case) in enumerate(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ):
for candidate in candidates:
snake_case__ : Dict =candidate + '''\n''' + test_case
snake_case__ : Any =(test_program, timeout, task_id, completion_id[task_id])
snake_case__ : Tuple =executor.submit(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE )
futures.append(__SCREAMING_SNAKE_CASE )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__SCREAMING_SNAKE_CASE ):
snake_case__ : Dict =future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
snake_case__, snake_case__ : Any =[], []
for result in results.values():
result.sort()
snake_case__ : Dict =[r[1]['''passed'''] for r in result]
total.append(len(__SCREAMING_SNAKE_CASE ) )
correct.append(sum(__SCREAMING_SNAKE_CASE ) )
snake_case__ : List[Any] =np.array(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =np.array(__SCREAMING_SNAKE_CASE )
snake_case__ : str =k
snake_case__ : Any ={f'''pass@{k}''': estimate_pass_at_k(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowercase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
def estimator(SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] =itertools.repeat(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
else:
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =iter(SCREAMING_SNAKE_CASE )
return np.array([estimator(int(SCREAMING_SNAKE_CASE ) , int(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) for n, c in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )] )
| 381
|
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) -> Any:
"""simple docstring"""
snake_case__ : List[Any] =parent
snake_case__ : Union[str, Any] =batch_size
snake_case__ : List[str] =seq_length
snake_case__ : List[str] =is_training
snake_case__ : str =use_input_mask
snake_case__ : int =use_token_type_ids
snake_case__ : int =use_labels
snake_case__ : Union[str, Any] =vocab_size
snake_case__ : Dict =hidden_size
snake_case__ : Any =num_hidden_layers
snake_case__ : Any =num_attention_heads
snake_case__ : Optional[int] =intermediate_multiple_size
snake_case__ : List[str] =hidden_act
snake_case__ : List[str] =hidden_dropout
snake_case__ : Optional[int] =attention_dropout
snake_case__ : Union[str, Any] =weight_tying
snake_case__ : List[str] =max_position_embeddings
snake_case__ : Any =type_vocab_size
snake_case__ : Optional[Any] =type_sequence_label_size
snake_case__ : List[str] =initializer_range
snake_case__ : Tuple =num_labels
snake_case__ : Tuple =num_choices
snake_case__ : List[Any] =scope
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : List[str] =None
if self.use_input_mask:
snake_case__ : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : int =None
if self.use_labels:
snake_case__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Tuple =self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
snake_case__, snake_case__, snake_case__, snake_case__ : int =self.prepare_config_and_inputs()
snake_case__ : Optional[int] =True
return config, input_ids, input_mask, token_labels
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
snake_case__ : Dict =GPTNeoXJapaneseModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Dict =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
snake_case__ : Optional[int] =True
snake_case__ : Dict =GPTNeoXJapaneseModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[int] =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Tuple =GPTNeoXJapaneseForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Tuple =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
snake_case__ : Tuple =True
snake_case__ : Tuple =GPTNeoXJapaneseForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
snake_case__ : List[str] =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : str =ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : Dict =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ : List[str] =torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : str =torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ : Union[str, Any] =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =output_from_no_past['''hidden_states'''][0]
snake_case__ : Dict =model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
# select random slice
snake_case__ : Dict =ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : List[Any] =output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Optional[Any] =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] =self.prepare_config_and_inputs()
snake_case__, snake_case__, snake_case__, snake_case__ : str =config_and_inputs
snake_case__ : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ =(GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCAmelCase__ =(GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCAmelCase__ =(
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCAmelCase__ =False
lowerCAmelCase__ =False
lowerCAmelCase__ =False
lowerCAmelCase__ =False
def UpperCAmelCase ( self ) -> int:
"""simple docstring"""
snake_case__ : Optional[Any] =GPTNeoXJapaneseModelTester(self )
snake_case__ : List[str] =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__, snake_case__, snake_case__, snake_case__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> str:
"""simple docstring"""
snake_case__, snake_case__, snake_case__, snake_case__ : List[str] =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> str:
"""simple docstring"""
snake_case__, snake_case__, snake_case__, snake_case__ : List[str] =self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case__ : Optional[int] =None
self.model_tester.create_and_check_model_as_decoder(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
snake_case__, snake_case__, snake_case__, snake_case__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : Union[str, Any] ='''abeja/gpt-neox-japanese-2.7b'''
snake_case__ : str =['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
snake_case__ : Optional[int] =[
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
snake_case__ : Any =GPTNeoXJapaneseTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =GPTNeoXJapaneseForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] =[]
for prompt in prompts:
snake_case__ : Union[str, Any] =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_ids
snake_case__ : Optional[Any] =model.generate(__SCREAMING_SNAKE_CASE , max_length=50 )
snake_case__ : str =tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
predicted_outputs += generated_string
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 381
| 1
|
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , __magic_name__ : Any , __magic_name__ : Optional[int]=13 , __magic_name__ : List[str]=7 , __magic_name__ : Optional[int]=True , __magic_name__ : Optional[Any]=True , __magic_name__ : List[Any]=True , __magic_name__ : Tuple=True , __magic_name__ : Tuple=99 , __magic_name__ : Optional[int]=32 , __magic_name__ : Optional[int]=5 , __magic_name__ : Any=4 , __magic_name__ : List[Any]=37 , __magic_name__ : int="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : int=0.1 , __magic_name__ : Optional[int]=128 , __magic_name__ : int=32 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : str=2 , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : Union[str, Any]=3 , __magic_name__ : Any=4 , __magic_name__ : List[str]=None , ) -> Any:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
def __A ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Optional[Any] ) -> Union[str, Any]:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def __A ( self : Optional[Any] ) -> Dict:
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __A ( self : str , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = NezhaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , token_type_ids=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : Dict , ) -> Dict:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = NezhaModel(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , encoder_hidden_states=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : str ) -> List[str]:
SCREAMING_SNAKE_CASE_ = NezhaForMaskedLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = NezhaForNextSentencePrediction(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __A ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Dict ) -> Any:
SCREAMING_SNAKE_CASE_ = NezhaForPreTraining(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , next_sentence_label=__magic_name__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __A ( self : Tuple , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = NezhaForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = NezhaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = NezhaForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Any , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.num_choices
SCREAMING_SNAKE_CASE_ = NezhaForMultipleChoice(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
def __A ( self : int , __magic_name__ : List[str] , __magic_name__ : Dict , __magic_name__ : Tuple=False ) -> str:
SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
if return_labels:
if model_class in get_values(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def __A ( self : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = NezhaModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def __A ( self : Union[str, Any] ) -> Any:
self.config_tester.run_common_tests()
def __A ( self : int ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __A ( self : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__magic_name__ )
def __A ( self : str ) -> Optional[Any]:
# This regression test was failing with PyTorch < 1.3
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE_ = None
self.model_tester.create_and_check_model_as_decoder(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , )
def __A ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def __A ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
def __A ( self : str ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__magic_name__ )
def __A ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__magic_name__ )
def __A ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
def __A ( self : str ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def __A ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
@slow
def __A ( self : Union[str, Any] ) -> str:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = NezhaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@slow
@require_torch_gpu
def __A ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(config=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.jit.trace(
__magic_name__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__magic_name__ , os.path.join(__magic_name__ , "bert.pt" ) )
SCREAMING_SNAKE_CASE_ = torch.jit.load(os.path.join(__magic_name__ , "bert.pt" ) , map_location=__magic_name__ )
loaded(inputs_dict["input_ids"].to(__magic_name__ ) , inputs_dict["attention_mask"].to(__magic_name__ ) )
@require_torch
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ )[0]
SCREAMING_SNAKE_CASE_ = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) )
@slow
def __A ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ )[0]
SCREAMING_SNAKE_CASE_ = torch.Size((1, 6, 21_128) )
self.assertEqual(output.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) )
| 356
|
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = logging.get_logger()
# the current default level is logging.WARNING
SCREAMING_SNAKE_CASE_ = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(__magic_name__ )
def __A ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = logging.get_verbosity()
SCREAMING_SNAKE_CASE_ = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(__magic_name__ ) as cl:
logger.warning(__magic_name__ )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(__magic_name__ ) as cl:
logger.warning(__magic_name__ )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(__magic_name__ ) as cl:
logger.warning(__magic_name__ )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(__magic_name__ )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def __A ( self : int ) -> Optional[Any]:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
SCREAMING_SNAKE_CASE_ = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ = os.getenv("TRANSFORMERS_VERBOSITY" , __magic_name__ )
SCREAMING_SNAKE_CASE_ = logging.log_levels[env_level_str]
SCREAMING_SNAKE_CASE_ = logging.get_verbosity()
self.assertEqual(
__magic_name__ , __magic_name__ , F'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
SCREAMING_SNAKE_CASE_ = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def __A ( self : Dict ) -> List[Any]:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE_ = logging.logging.getLogger()
with CaptureLogger(__magic_name__ ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def __A ( self : Dict ) -> Optional[int]:
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE_ = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(__magic_name__ ) as cl:
logger.warning_advice(__magic_name__ )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(__magic_name__ ) as cl:
logger.warning_advice(__magic_name__ )
self.assertEqual(cl.out , msg + "\n" )
def a__ ( ):
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 356
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowercase__ ( __lowerCamelCase ):
"""simple docstring"""
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """num_encoder_blocks""" ) )
class lowercase__ :
"""simple docstring"""
def __init__( self , _A , _A=1_3 , _A=6_4 , _A=3 , _A=4 , _A=[2, 2, 2, 2] , _A=[8, 4, 2, 1] , _A=[1_6, 3_2, 6_4, 1_2_8] , _A=[1, 4, 8, 1_6] , _A=[1, 2, 4, 8] , _A=True , _A=True , _A="gelu" , _A=0.1 , _A=0.1 , _A=0.02 , _A=3 , _A=None , ):
'''simple docstring'''
UpperCamelCase : Any = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : Any = image_size
UpperCamelCase : Dict = num_channels
UpperCamelCase : List[Any] = num_encoder_blocks
UpperCamelCase : List[str] = sr_ratios
UpperCamelCase : Union[str, Any] = depths
UpperCamelCase : List[Any] = hidden_sizes
UpperCamelCase : Optional[Any] = downsampling_rates
UpperCamelCase : str = num_attention_heads
UpperCamelCase : Union[str, Any] = is_training
UpperCamelCase : Dict = use_labels
UpperCamelCase : Tuple = hidden_act
UpperCamelCase : Optional[Any] = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : Any = initializer_range
UpperCamelCase : Dict = num_labels
UpperCamelCase : List[Any] = scope
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Union[str, Any] = None
if self.use_labels:
UpperCamelCase : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def _a ( self ):
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _a ( self , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : Tuple = SegformerModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _a ( self , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : Tuple = self.num_labels
UpperCamelCase : Any = SegformerForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
UpperCamelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = 1
UpperCamelCase : Any = SegformerForSemanticSegmentation(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Optional[int] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
UpperCamelCase : Optional[Any] = config_and_inputs
UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : str = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : Dict = True
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Optional[Any] = False
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = SegformerModelTester(self )
UpperCamelCase : List[str] = SegformerConfigTester(self , config_class=_SCREAMING_SNAKE_CASE )
def _a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_SCREAMING_SNAKE_CASE )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_SCREAMING_SNAKE_CASE )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def _a ( self ):
'''simple docstring'''
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def _a ( self ):
'''simple docstring'''
pass
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Tuple = [*signature.parameters.keys()]
UpperCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Dict = True
for model_class in self.all_model_classes:
UpperCamelCase : Any = True
UpperCamelCase : Any = False
UpperCamelCase : Tuple = True
UpperCamelCase : str = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase : int = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Tuple = outputs.attentions
UpperCamelCase : Dict = sum(self.model_tester.depths )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : List[str] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase : int = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Optional[Any] = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# verify the first attentions (first block, first layer)
UpperCamelCase : Union[str, Any] = (self.model_tester.image_size // 4) ** 2
UpperCamelCase : str = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
UpperCamelCase : Dict = (self.model_tester.image_size // 3_2) ** 2
UpperCamelCase : Dict = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
UpperCamelCase : Optional[int] = len(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : str = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase : Any = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + 1 , len(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Optional[Any] = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# verify the first attentions (first block, first layer)
UpperCamelCase : str = (self.model_tester.image_size // 4) ** 2
UpperCamelCase : Tuple = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _a ( self ):
'''simple docstring'''
def check_hidden_states_output(_A , _A , _A ):
UpperCamelCase : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase : List[str] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Union[str, Any] = outputs.hidden_states
UpperCamelCase : List[Any] = self.model_tester.num_encoder_blocks
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : int = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : int = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Dict = True
for model_class in self.all_model_classes:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
continue
UpperCamelCase : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase : Union[str, Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
UpperCamelCase : int = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ):
'''simple docstring'''
pass
@slow
def _a ( self ):
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : int = SegformerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def UpperCamelCase ():
UpperCamelCase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Any = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=_SCREAMING_SNAKE_CASE , align=_SCREAMING_SNAKE_CASE , do_random_crop=_SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
_SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = prepare_img()
UpperCamelCase : List[Any] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
UpperCamelCase : Any = encoded_inputs.pixel_values.to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
UpperCamelCase : Any = model(_SCREAMING_SNAKE_CASE )
UpperCamelCase : str = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=_SCREAMING_SNAKE_CASE , align=_SCREAMING_SNAKE_CASE , do_random_crop=_SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = prepare_img()
UpperCamelCase : Optional[int] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
UpperCamelCase : Optional[Any] = encoded_inputs.pixel_values.to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
UpperCamelCase : str = model(_SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-1 ) )
@slow
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=_SCREAMING_SNAKE_CASE , align=_SCREAMING_SNAKE_CASE , do_random_crop=_SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
_SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = prepare_img()
UpperCamelCase : Optional[Any] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
UpperCamelCase : str = encoded_inputs.pixel_values.to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
UpperCamelCase : Tuple = model(_SCREAMING_SNAKE_CASE )
UpperCamelCase : int = outputs.logits.detach().cpu()
UpperCamelCase : Tuple = image_processor.post_process_semantic_segmentation(outputs=_SCREAMING_SNAKE_CASE , target_sizes=[(5_0_0, 3_0_0)] )
UpperCamelCase : int = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=_SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = torch.Size((1_2_8, 1_2_8) )
self.assertEqual(segmentation[0].shape , _SCREAMING_SNAKE_CASE )
| 102
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = """Speech2TextFeatureExtractor"""
lowerCAmelCase__ : Union[str, Any] = """Speech2TextTokenizer"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ : str = self.feature_extractor
a_ : Dict = False
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
a_ : Any = kwargs.pop("raw_speech" )
else:
a_ : Tuple = kwargs.pop("audio" , _SCREAMING_SNAKE_CASE )
a_ : int = kwargs.pop("sampling_rate" , _SCREAMING_SNAKE_CASE )
a_ : Dict = kwargs.pop("text" , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
a_ : List[Any] = args[0]
a_ : List[Any] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
a_ : int = self.feature_extractor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
a_ : Dict = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a_ : Any = encodings["input_ids"]
return inputs
def A ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@contextmanager
def A ( self ) -> List[Any]:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
a_ : Tuple = True
a_ : Tuple = self.tokenizer
yield
a_ : int = self.feature_extractor
a_ : str = False
| 473
| 0
|
UpperCAmelCase_ = 8.31_4462 # Unit - J mol-1 K-1
def lowerCamelCase__ ( A__ : float , A__ : float , A__ : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCamelCase__ ( A__ : float , A__ : float , A__ : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 707
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase__( __lowerCamelCase):
@slow
@require_torch
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
__lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 1_28
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(UpperCamelCase_: List[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=5_12 )
__lowerCamelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=1_28 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
__lowerCamelCase = outputs.attention_mask
assert all(len(UpperCamelCase_ ) == 5_12 for x in inputs.input_ids )
assert all(len(UpperCamelCase_ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCamelCase_: int ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase_ ) )] ) / len(UpperCamelCase_ )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=UpperCamelCase_ , per_device_train_batch_size=UpperCamelCase_ , per_device_eval_batch_size=UpperCamelCase_ , predict_with_generate=UpperCamelCase_ , evaluation_strategy="""steps""" , do_train=UpperCamelCase_ , do_eval=UpperCamelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
# start training
trainer.train()
| 80
| 0
|
from ...configuration_utils import PretrainedConfig
lowerCamelCase__ = {
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Tuple ='tapas'
def __init__( self : Tuple , __lowercase : str=30522 , __lowercase : List[Any]=768 , __lowercase : Optional[int]=12 , __lowercase : Optional[Any]=12 , __lowercase : str=3072 , __lowercase : int="gelu" , __lowercase : List[str]=0.1 , __lowercase : Dict=0.1 , __lowercase : Union[str, Any]=1024 , __lowercase : Union[str, Any]=[3, 256, 256, 2, 256, 256, 10] , __lowercase : Optional[int]=0.02 , __lowercase : Tuple=1E-12 , __lowercase : Tuple=0 , __lowercase : int=10.0 , __lowercase : int=0 , __lowercase : List[Any]=1.0 , __lowercase : Optional[int]=None , __lowercase : Tuple=1.0 , __lowercase : Tuple=False , __lowercase : Optional[int]=None , __lowercase : Dict=1.0 , __lowercase : Tuple=1.0 , __lowercase : Optional[Any]=False , __lowercase : Optional[int]=False , __lowercase : Optional[int]="ratio" , __lowercase : List[str]=None , __lowercase : Tuple=None , __lowercase : Any=64 , __lowercase : List[Any]=32 , __lowercase : List[Any]=False , __lowercase : List[Any]=True , __lowercase : Union[str, Any]=False , __lowercase : Any=False , __lowercase : Optional[int]=True , __lowercase : Tuple=False , __lowercase : int=None , __lowercase : List[Any]=None , **__lowercase : str , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowercase , **__lowercase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_sizes
__a = initializer_range
__a = layer_norm_eps
# Fine-tuning task hyperparameters
__a = positive_label_weight
__a = num_aggregation_labels
__a = aggregation_loss_weight
__a = use_answer_as_supervision
__a = answer_loss_importance
__a = use_normalized_answer_loss
__a = huber_loss_delta
__a = temperature
__a = aggregation_temperature
__a = use_gumbel_for_cells
__a = use_gumbel_for_aggregation
__a = average_approximation_function
__a = cell_selection_preference
__a = answer_loss_cutoff
__a = max_num_rows
__a = max_num_columns
__a = average_logits_per_cell
__a = select_one_column
__a = allow_empty_column_selection
__a = init_cell_selection_weights_to_zero
__a = reset_position_index_per_cell
__a = disable_per_token_loss
# Aggregation hyperparameters
__a = aggregation_labels
__a = no_aggregation_label_index
if isinstance(self.aggregation_labels , __lowercase ):
__a = {int(__lowercase ): v for k, v in aggregation_labels.items()}
| 225
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowerCamelCase__ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Optional[Any] , __lowercase : Dict , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[Any]=None ):
'''simple docstring'''
super().__init__(
__lowercase , question_encoder_tokenizer=__lowercase , generator_tokenizer=__lowercase , index=__lowercase , init_retrieval=__lowercase , )
__a = None
def UpperCamelCase_ ( self : List[Any] , __lowercase : int ):
'''simple docstring'''
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__a = self._infer_socket_ifname()
# avoid clash with the NCCL port
__a = str(distributed_port + 1 )
__a = dist.new_group(ranks=__lowercase , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def UpperCamelCase_ ( self : int , __lowercase : List[str] , __lowercase : int , __lowercase : List[str]=torch.floataa ):
'''simple docstring'''
__a = torch.empty(__lowercase , dtype=__lowercase )
dist.scatter(__lowercase , src=0 , scatter_list=__lowercase , group=self.process_group )
return target_tensor
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__a = next((addr for addr in addrs if addr.startswith("""e""" )) , __lowercase )
return ifname
def UpperCamelCase_ ( self : int , __lowercase : np.ndarray , __lowercase : int ):
'''simple docstring'''
# single GPU training
if not dist.is_initialized():
__a , __a = self._main_retrieve(__lowercase , __lowercase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowercase )
# distributed training
__a = dist.get_world_size(group=self.process_group )
# gather logic
__a = None
if self._is_main():
__a = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowercase )]
dist.gather(torch.tensor(__lowercase ) , dst=0 , gather_list=__lowercase , group=self.process_group )
# scatter logic
__a = question_hidden_states.shape[0]
__a = []
__a = []
if self._is_main():
assert len(__lowercase ) == world_size
__a , __a = self._main_retrieve(torch.cat(__lowercase ).numpy() , __lowercase )
__a , __a = torch.tensor(__lowercase ), torch.tensor(__lowercase )
__a = self._chunk_tensor(__lowercase , __lowercase )
__a = self._chunk_tensor(__lowercase , __lowercase )
__a = self._scattered(__lowercase , [n_queries, n_docs] , target_type=torch.intaa )
__a = self._scattered(__lowercase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowercase )
| 225
| 1
|
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_ , a_ ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=a_ , unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__(self , a_ = 1 , a_ = None , a_ = 0.0 , a_ = 50 , a_ = "pil" , a_ = True , **a_ , ):
'''simple docstring'''
__snake_case : Dict = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a_ , )
__snake_case : Tuple = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : Optional[Any] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
__snake_case : Dict = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : List[Any] = {}
if accepts_eta:
__snake_case : Dict = eta
for t in self.progress_bar(self.scheduler.timesteps ):
__snake_case : Optional[Any] = self.scheduler.scale_model_input(a_ , a_ )
# predict the noise residual
__snake_case : Tuple = self.unet(a_ , a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
__snake_case : int = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
# decode the image latents with the VAE
__snake_case : Union[str, Any] = self.vqvae.decode(a_ ).sample
__snake_case : str = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case : Union[str, Any] = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 229
|
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__snake_case : List[str] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=a_ , cache_dir=a_ )
__snake_case : Optional[Any] = [t[-1] for t in os.walk(os.path.join(a_ , os.listdir(a_ )[0] , '''snapshots''' ) )]
__snake_case : Any = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=a_ )
__snake_case : Union[str, Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__snake_case : Union[str, Any] = jax.random.PRNGKey(0 )
__snake_case : Tuple = 4
__snake_case : Any = jax.device_count()
__snake_case : Any = num_samples * [prompt]
__snake_case : Optional[int] = pipeline.prepare_inputs(a_ )
# shard inputs and rng
__snake_case : Optional[int] = replicate(a_ )
__snake_case : List[str] = jax.random.split(a_ , a_ )
__snake_case : List[str] = shard(a_ )
__snake_case : int = pipeline(a_ , a_ , a_ , a_ , jit=a_ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.151_4745 ) < 1E-3
assert np.abs(np.abs(a_ , dtype=np.floataa ).sum() - 4_9947.875 ) < 5E-1
__snake_case : int = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(a_ ) == num_samples
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=a_ )
__snake_case : Union[str, Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__snake_case : Dict = jax.random.PRNGKey(0 )
__snake_case : Union[str, Any] = 50
__snake_case : Optional[Any] = jax.device_count()
__snake_case : List[Any] = num_samples * [prompt]
__snake_case : Tuple = pipeline.prepare_inputs(a_ )
# shard inputs and rng
__snake_case : Tuple = replicate(a_ )
__snake_case : Tuple = jax.random.split(a_ , a_ )
__snake_case : Any = shard(a_ )
__snake_case : Optional[int] = pipeline(a_ , a_ , a_ , a_ , jit=a_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0565_2401) ) < 1E-3
assert np.abs((np.abs(a_ , dtype=np.floataa ).sum() - 238_3808.2) ) < 5E-1
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=a_ )
__snake_case : Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__snake_case : Optional[int] = jax.random.PRNGKey(0 )
__snake_case : Optional[Any] = 50
__snake_case : Any = jax.device_count()
__snake_case : Optional[Any] = num_samples * [prompt]
__snake_case : Any = pipeline.prepare_inputs(a_ )
# shard inputs and rng
__snake_case : Optional[int] = replicate(a_ )
__snake_case : int = jax.random.split(a_ , a_ )
__snake_case : int = shard(a_ )
__snake_case : List[Any] = pipeline(a_ , a_ , a_ , a_ , jit=a_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(a_ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
__snake_case : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__snake_case : List[Any] = jax.random.PRNGKey(0 )
__snake_case : Tuple = 50
__snake_case : Dict = jax.device_count()
__snake_case : Any = num_samples * [prompt]
__snake_case : List[Any] = pipeline.prepare_inputs(a_ )
# shard inputs and rng
__snake_case : Dict = replicate(a_ )
__snake_case : str = jax.random.split(a_ , a_ )
__snake_case : Dict = shard(a_ )
__snake_case : Any = pipeline(a_ , a_ , a_ , a_ , jit=a_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(a_ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = FlaxDDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , set_alpha_to_one=a_ , steps_offset=1 , )
__snake_case , __snake_case : Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=a_ , safety_checker=a_ , )
__snake_case : int = scheduler.create_state()
__snake_case : Optional[int] = scheduler_state
__snake_case : str = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__snake_case : Tuple = jax.random.PRNGKey(0 )
__snake_case : List[str] = 50
__snake_case : Optional[int] = jax.device_count()
__snake_case : Optional[Any] = num_samples * [prompt]
__snake_case : Union[str, Any] = pipeline.prepare_inputs(a_ )
# shard inputs and rng
__snake_case : Optional[Any] = replicate(a_ )
__snake_case : str = jax.random.split(a_ , a_ )
__snake_case : int = shard(a_ )
__snake_case : Optional[Any] = pipeline(a_ , a_ , a_ , a_ , jit=a_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1E-3
assert np.abs((np.abs(a_ , dtype=np.floataa ).sum() - 234_7693.5) ) < 5E-1
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__snake_case : Dict = jax.device_count()
__snake_case : Any = num_samples * [prompt]
__snake_case : List[str] = jax.random.split(jax.random.PRNGKey(0 ) , a_ )
__snake_case , __snake_case : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=a_ , )
__snake_case : Tuple = replicate(a_ )
__snake_case : Dict = pipeline.prepare_inputs(a_ )
__snake_case : Union[str, Any] = shard(a_ )
__snake_case : str = pipeline(a_ , a_ , a_ , jit=a_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
__snake_case : Tuple = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
__snake_case , __snake_case : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=a_ , use_memory_efficient_attention=a_ , )
__snake_case : Tuple = replicate(a_ )
__snake_case : Optional[Any] = pipeline.prepare_inputs(a_ )
__snake_case : Union[str, Any] = shard(a_ )
__snake_case : Tuple = pipeline(a_ , a_ , a_ , jit=a_ ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
__snake_case : Optional[int] = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 229
| 1
|
"""simple docstring"""
import sys
from collections import defaultdict
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> Tuple:
a_ : int = []
def A ( self , _SCREAMING_SNAKE_CASE ) -> Any:
return self.node_position[vertex]
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
a_ : Optional[int] = pos
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
a_ : Dict = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
a_ : List[str] = 2 * start + 1
else:
a_ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
a_ , a_ : Tuple = heap[smallest_child], positions[smallest_child]
a_ , a_ : Union[str, Any] = (
heap[start],
positions[start],
)
a_ , a_ : Tuple = temp, tempa
a_ : Tuple = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _SCREAMING_SNAKE_CASE )
self.top_to_bottom(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
a_ : Optional[Any] = position[index]
while index != 0:
a_ : Optional[int] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
a_ : Dict = heap[parent]
a_ : List[str] = position[parent]
self.set_position(position[parent] , _SCREAMING_SNAKE_CASE )
else:
a_ : Optional[Any] = val
a_ : int = temp
self.set_position(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
break
a_ : Tuple = parent
else:
a_ : List[Any] = val
a_ : Optional[Any] = temp
self.set_position(_SCREAMING_SNAKE_CASE , 0 )
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
a_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE ) // 2 - 1
for i in range(_SCREAMING_SNAKE_CASE , -1 , -1 ):
self.top_to_bottom(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
a_ : str = positions[0]
a_ : List[str] = sys.maxsize
self.top_to_bottom(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
return temp
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :str ) -> Tuple:
a_ : Tuple = Heap()
a_ : List[str] = [0] * len(_SCREAMING_SNAKE_CASE )
a_ : List[Any] = [-1] * len(_SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
a_ : Dict = [] # Heap of Distance of vertices from their neighboring vertex
a_ : int = []
for vertex in range(len(_SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(_SCREAMING_SNAKE_CASE )
heap.node_position.append(_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = []
a_ : List[Any] = 1
a_ : Union[str, Any] = sys.maxsize
for neighbor, distance in adjacency_list[0]:
a_ : int = 0
a_ : Optional[Any] = distance
heap.heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for _ in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
a_ : Any = heap.delete_minimum(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
a_ : str = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_SCREAMING_SNAKE_CASE )]
):
a_ : List[str] = distance
heap.bottom_to_top(
_SCREAMING_SNAKE_CASE , heap.get_position(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ : List[Any] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCamelCase = int(input('Enter number of edges: ').strip())
UpperCamelCase = defaultdict(list)
for _ in range(edges_number):
UpperCamelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 473
|
"""simple docstring"""
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :str ) -> str:
a_ : int = len(_SCREAMING_SNAKE_CASE )
a_ : int = len(_SCREAMING_SNAKE_CASE )
a_ : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
a_ : list = []
for char_count in range(_SCREAMING_SNAKE_CASE ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(alternative_string_arrange('AB', 'XYZ'), end=' ')
| 473
| 1
|
def A (__A : int ) -> list:
"""simple docstring"""
UpperCAmelCase_ = int(__A )
if n_element < 1:
UpperCAmelCase_ = ValueError('''a should be a positive number''' )
raise my_error
UpperCAmelCase_ = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (0, 0, 0)
UpperCAmelCase_ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
snake_case_ : int = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
snake_case_ : Optional[int] = hamming(int(n))
print("-----------------------------------------------------")
print(f"The list with nth numbers is: {hamming_numbers}")
print("-----------------------------------------------------")
| 169
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __snake_case ( a , a ):
@register_to_config
def __init__( self : List[Any] , _snake_case : int = 128 , _snake_case : int = 256 , _snake_case : float = 2_0_0_0.0 , _snake_case : int = 768 , _snake_case : int = 12 , _snake_case : int = 12 , _snake_case : int = 64 , _snake_case : int = 2048 , _snake_case : float = 0.1 , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = nn.Sequential(
nn.Linear(_snake_case , d_model * 4 , bias=_snake_case) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_snake_case) , nn.SiLU() , )
UpperCAmelCase_ = nn.Embedding(_snake_case , _snake_case)
UpperCAmelCase_ = False
UpperCAmelCase_ = nn.Linear(_snake_case , _snake_case , bias=_snake_case)
UpperCAmelCase_ = nn.Dropout(p=_snake_case)
UpperCAmelCase_ = nn.ModuleList()
for lyr_num in range(_snake_case):
# FiLM conditional T5 decoder
UpperCAmelCase_ = DecoderLayer(d_model=_snake_case , d_kv=_snake_case , num_heads=_snake_case , d_ff=_snake_case , dropout_rate=_snake_case)
self.decoders.append(_snake_case)
UpperCAmelCase_ = TaLayerNorm(_snake_case)
UpperCAmelCase_ = nn.Dropout(p=_snake_case)
UpperCAmelCase_ = nn.Linear(_snake_case , _snake_case , bias=_snake_case)
def lowerCamelCase ( self : Optional[int] , _snake_case : str , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = torch.mul(query_input.unsqueeze(-1) , key_input.unsqueeze(-2))
return mask.unsqueeze(-3)
def lowerCamelCase ( self : Dict , _snake_case : Any , _snake_case : Optional[int] , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCAmelCase_ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype)
UpperCAmelCase_ = self.conditioning_emb(_snake_case).unsqueeze(1)
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCAmelCase_ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCAmelCase_ = torch.broadcast_to(
torch.arange(_snake_case , device=decoder_input_tokens.device) , (batch, seq_length) , )
UpperCAmelCase_ = self.position_encoding(_snake_case)
UpperCAmelCase_ = self.continuous_inputs_projection(_snake_case)
inputs += position_encodings
UpperCAmelCase_ = self.dropout(_snake_case)
# decoder: No padding present.
UpperCAmelCase_ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype)
# Translate encoding masks to encoder-decoder masks.
UpperCAmelCase_ = [(x, self.encoder_decoder_mask(_snake_case , _snake_case)) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCAmelCase_ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1)
UpperCAmelCase_ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1)
for lyr in self.decoders:
UpperCAmelCase_ = lyr(
_snake_case , conditioning_emb=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )[0]
UpperCAmelCase_ = self.decoder_norm(_snake_case)
UpperCAmelCase_ = self.post_dropout(_snake_case)
UpperCAmelCase_ = self.spec_out(_snake_case)
return spec_out
class __snake_case ( nn.Module ):
def __init__( self : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Any , _snake_case : List[Any] , _snake_case : Optional[int]=1e-6):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_snake_case , d_kv=_snake_case , num_heads=_snake_case , dropout_rate=_snake_case))
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_snake_case , d_kv=_snake_case , num_heads=_snake_case , dropout_rate=_snake_case , layer_norm_epsilon=_snake_case , ))
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_snake_case , d_ff=_snake_case , dropout_rate=_snake_case , layer_norm_epsilon=_snake_case))
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : Union[str, Any]=None , _snake_case : Any=None , _snake_case : Any=None , _snake_case : Any=None , _snake_case : Tuple=None , ):
"""simple docstring"""
UpperCAmelCase_ = self.layer[0](
_snake_case , conditioning_emb=_snake_case , attention_mask=_snake_case , )
if encoder_hidden_states is not None:
UpperCAmelCase_ = torch.where(encoder_attention_mask > 0 , 0 , -1e10).to(
encoder_hidden_states.dtype)
UpperCAmelCase_ = self.layer[1](
_snake_case , key_value_states=_snake_case , attention_mask=_snake_case , )
# Apply Film Conditional Feed Forward layer
UpperCAmelCase_ = self.layer[-1](_snake_case , _snake_case)
return (hidden_states,)
class __snake_case ( nn.Module ):
def __init__( self : List[Any] , _snake_case : int , _snake_case : str , _snake_case : int , _snake_case : str):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = TaLayerNorm(_snake_case)
UpperCAmelCase_ = TaFiLMLayer(in_features=d_model * 4 , out_features=_snake_case)
UpperCAmelCase_ = Attention(query_dim=_snake_case , heads=_snake_case , dim_head=_snake_case , out_bias=_snake_case , scale_qk=_snake_case)
UpperCAmelCase_ = nn.Dropout(_snake_case)
def lowerCamelCase ( self : str , _snake_case : str , _snake_case : Dict=None , _snake_case : List[str]=None , ):
"""simple docstring"""
UpperCAmelCase_ = self.layer_norm(_snake_case)
if conditioning_emb is not None:
UpperCAmelCase_ = self.FiLMLayer(_snake_case , _snake_case)
# Self-attention block
UpperCAmelCase_ = self.attention(_snake_case)
UpperCAmelCase_ = hidden_states + self.dropout(_snake_case)
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self : Any , _snake_case : Any , _snake_case : Any , _snake_case : Tuple , _snake_case : int , _snake_case : List[str]):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = Attention(query_dim=_snake_case , heads=_snake_case , dim_head=_snake_case , out_bias=_snake_case , scale_qk=_snake_case)
UpperCAmelCase_ = TaLayerNorm(_snake_case , eps=_snake_case)
UpperCAmelCase_ = nn.Dropout(_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[Any] , _snake_case : List[str]=None , _snake_case : Tuple=None , ):
"""simple docstring"""
UpperCAmelCase_ = self.layer_norm(_snake_case)
UpperCAmelCase_ = self.attention(
_snake_case , encoder_hidden_states=_snake_case , attention_mask=attention_mask.squeeze(1) , )
UpperCAmelCase_ = hidden_states + self.dropout(_snake_case)
return layer_output
class __snake_case ( nn.Module ):
def __init__( self : List[Any] , _snake_case : Tuple , _snake_case : Any , _snake_case : List[Any] , _snake_case : Union[str, Any]):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = TaDenseGatedActDense(d_model=_snake_case , d_ff=_snake_case , dropout_rate=_snake_case)
UpperCAmelCase_ = TaFiLMLayer(in_features=d_model * 4 , out_features=_snake_case)
UpperCAmelCase_ = TaLayerNorm(_snake_case , eps=_snake_case)
UpperCAmelCase_ = nn.Dropout(_snake_case)
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : int=None):
"""simple docstring"""
UpperCAmelCase_ = self.layer_norm(_snake_case)
if conditioning_emb is not None:
UpperCAmelCase_ = self.film(_snake_case , _snake_case)
UpperCAmelCase_ = self.DenseReluDense(_snake_case)
UpperCAmelCase_ = hidden_states + self.dropout(_snake_case)
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Union[str, Any]):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = nn.Linear(_snake_case , _snake_case , bias=_snake_case)
UpperCAmelCase_ = nn.Linear(_snake_case , _snake_case , bias=_snake_case)
UpperCAmelCase_ = nn.Linear(_snake_case , _snake_case , bias=_snake_case)
UpperCAmelCase_ = nn.Dropout(_snake_case)
UpperCAmelCase_ = NewGELUActivation()
def lowerCamelCase ( self : List[str] , _snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = self.act(self.wi_a(_snake_case))
UpperCAmelCase_ = self.wi_a(_snake_case)
UpperCAmelCase_ = hidden_gelu * hidden_linear
UpperCAmelCase_ = self.dropout(_snake_case)
UpperCAmelCase_ = self.wo(_snake_case)
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self : Any , _snake_case : Optional[Any] , _snake_case : List[Any]=1e-6):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = nn.Parameter(torch.ones(_snake_case))
UpperCAmelCase_ = eps
def lowerCamelCase ( self : Tuple , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = hidden_states.to(torch.floataa).pow(2).mean(-1 , keepdim=_snake_case)
UpperCAmelCase_ = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCAmelCase_ = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class __snake_case ( nn.Module ):
def lowerCamelCase ( self : Tuple , _snake_case : torch.Tensor):
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.0_4_4_7_1_5 * torch.pow(_snake_case , 3.0))))
class __snake_case ( nn.Module ):
def __init__( self : int , _snake_case : int , _snake_case : Optional[Any]):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = nn.Linear(_snake_case , out_features * 2 , bias=_snake_case)
def lowerCamelCase ( self : Optional[Any] , _snake_case : Tuple , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.scale_bias(_snake_case)
UpperCAmelCase_ , UpperCAmelCase_ = torch.chunk(_snake_case , 2 , -1)
UpperCAmelCase_ = x * (1 + scale) + shift
return x
| 169
| 1
|
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowercase_ : Optional[int] = datasets.utils.logging.get_logger(__name__)
class UpperCamelCase ( folder_based_builder.FolderBasedBuilderConfig ):
A__ = None
A__ = None
class UpperCamelCase ( folder_based_builder.FolderBasedBuilder ):
A__ = datasets.Audio()
A__ = """audio"""
A__ = AudioFolderConfig
A__ = 42 # definition at the bottom of the script
A__ = AudioClassification(audio_column="""audio""" , label_column="""label""" )
lowercase_ : Optional[int] = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
lowercase_ : Union[str, Any] = AUDIO_EXTENSIONS
| 572
|
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class UpperCamelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ("foo.json",)] )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = GenerationConfig(
do_sample=snake_case__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ , config_name=snake_case__ )
_SCREAMING_SNAKE_CASE : List[str] = GenerationConfig.from_pretrained(snake_case__ , config_name=snake_case__ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , snake_case__ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained("gpt2" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = GenerationConfig.from_model_config(snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(snake_case__ , snake_case__ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = GenerationConfig()
_SCREAMING_SNAKE_CASE : str = {
"max_new_tokens": 1024,
"foo": "bar",
}
_SCREAMING_SNAKE_CASE : List[str] = copy.deepcopy(snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[int] = generation_config.update(**snake_case__ )
# update_kwargs was not modified (no side effects)
self.assertEqual(snake_case__ , snake_case__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(snake_case__ , {"foo": "bar"} )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = GenerationConfig()
_SCREAMING_SNAKE_CASE : Dict = "bar"
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE : List[Any] = GenerationConfig.from_pretrained(snake_case__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar" )
_SCREAMING_SNAKE_CASE : Any = GenerationConfig.from_model_config(snake_case__ )
assert not hasattr(snake_case__ , "foo" ) # no new kwargs should be initialized if from config
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , snake_case__ )
self.assertEqual(default_config.num_beams , 1 )
_SCREAMING_SNAKE_CASE : Dict = GenerationConfig(
do_sample=snake_case__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , snake_case__ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE : Any = GenerationConfig.from_pretrained(snake_case__ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , snake_case__ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class UpperCamelCase ( unittest.TestCase ):
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = GenerationConfig(
do_sample=snake_case__ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : Any = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case__ , repo_id="test-generation-config" , push_to_hub=snake_case__ , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : Optional[int] = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = GenerationConfig(
do_sample=snake_case__ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : Tuple = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case__ , repo_id="valid_org/test-generation-config-org" , push_to_hub=snake_case__ , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
| 572
| 1
|
import numpy as np
class lowerCamelCase :
'''simple docstring'''
def __init__( self ):
UpperCAmelCase_ = (0, 0)
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
def __eq__( self , lowerCAmelCase ):
return self.position == cell.position
def A__ ( self ):
print(self.position )
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase=(5, 5) ):
UpperCAmelCase_ = np.zeros(lowerCAmelCase )
UpperCAmelCase_ = world_size[0]
UpperCAmelCase_ = world_size[1]
def A__ ( self ):
print(self.w )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
UpperCAmelCase_ = cell.position[0]
UpperCAmelCase_ = cell.position[1]
UpperCAmelCase_ = []
for n in neughbour_cord:
UpperCAmelCase_ = current_x + n[0]
UpperCAmelCase_ = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
UpperCAmelCase_ = Cell()
UpperCAmelCase_ = (x, y)
UpperCAmelCase_ = cell
neighbours.append(lowerCAmelCase )
return neighbours
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCAmelCase_ = []
UpperCAmelCase_ = []
_open.append(__SCREAMING_SNAKE_CASE )
while _open:
UpperCAmelCase_ = np.argmin([n.f for n in _open] )
UpperCAmelCase_ = _open[min_f]
_closed.append(_open.pop(__SCREAMING_SNAKE_CASE ) )
if current == goal:
break
for n in world.get_neigbours(__SCREAMING_SNAKE_CASE ):
for c in _closed:
if c == n:
continue
UpperCAmelCase_ = current.g + 1
UpperCAmelCase_ , UpperCAmelCase_ = n.position
UpperCAmelCase_ , UpperCAmelCase_ = goal.position
UpperCAmelCase_ = (ya - ya) ** 2 + (xa - xa) ** 2
UpperCAmelCase_ = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = []
while current.parent is not None:
path.append(current.position )
UpperCAmelCase_ = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = Gridworld()
# Start position and goal
SCREAMING_SNAKE_CASE = Cell()
SCREAMING_SNAKE_CASE = (0, 0)
SCREAMING_SNAKE_CASE = Cell()
SCREAMING_SNAKE_CASE = (4, 4)
print(f'''path from {start.position} to {goal.position}''')
SCREAMING_SNAKE_CASE = astar(world, start, goal)
# Just for visual reasons.
for i in s:
SCREAMING_SNAKE_CASE = 1
print(world.w)
| 23
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _lowerCAmelCase ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Dict = RoCBertTokenizer
__magic_name__ :str = None
__magic_name__ :int = False
__magic_name__ :Dict = True
__magic_name__ :Optional[int] = filter_non_english
def snake_case ( self ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ :Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
lowerCAmelCase__ :Optional[int] = {}
lowerCAmelCase__ :Any = {}
for i, value in enumerate(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :str = i
lowerCAmelCase__ :int = i
lowerCAmelCase__ :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
lowerCAmelCase__ :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase__ :Optional[Any] = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_SCREAMING_SNAKE_CASE ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_SCREAMING_SNAKE_CASE ) , [5, 6, 2, 5, 7, 8] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCAmelCase__ :Tuple = {}
for i, token in enumerate(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :List[Any] = i
lowerCAmelCase__ :Any = RoCBertWordpieceTokenizer(vocab=_SCREAMING_SNAKE_CASE , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def snake_case ( self ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def snake_case ( self ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def snake_case ( self ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
lowerCAmelCase__ :Union[str, Any] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def snake_case ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase__ :Any = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCAmelCase__ :str = tokenizer_r.encode_plus(
_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ :Optional[Any] = tokenizer_r.do_lower_case if hasattr(_SCREAMING_SNAKE_CASE , 'do_lower_case' ) else False
lowerCAmelCase__ :Any = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "Allen"),
((2_1, 2_3), "##NL"),
((2_3, 2_4), "##P"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "allen"),
((2_1, 2_3), "##nl"),
((2_3, 2_4), "##p"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = ["的", "人", "有"]
lowerCAmelCase__ :str = "".join(_SCREAMING_SNAKE_CASE )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase__ :Any = True
lowerCAmelCase__ :Optional[Any] = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = tokenizer_p.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = tokenizer_r.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = tokenizer_r.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = tokenizer_p.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[Any] = False
lowerCAmelCase__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Any = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = tokenizer_r.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[Any] = tokenizer_p.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Any = tokenizer_r.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = tokenizer_p.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase__ :Optional[int] = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(_SCREAMING_SNAKE_CASE )
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase__ :str = tokenizer.encode('你好' , add_special_tokens=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = tokenizer.encode('你是谁' , add_special_tokens=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[int] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
lowerCAmelCase__ :Any = "你好,你是谁"
lowerCAmelCase__ :Union[str, Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = tokenizer.convert_tokens_to_shape_ids(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = tokenizer.convert_tokens_to_pronunciation_ids(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = tokenizer.prepare_for_model(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Union[str, Any] = tokenizer.encode_plus(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 93
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A ( self ) -> int:
a_ : Any = 1
a_ : str = 3
a_ : Dict = (3_2, 3_2)
a_ : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_SCREAMING_SNAKE_CASE )
return image
@property
def A ( self ) -> Tuple:
torch.manual_seed(0 )
a_ : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
return model
@property
def A ( self ) -> Any:
torch.manual_seed(0 )
a_ : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def A ( self ) -> List[Any]:
torch.manual_seed(0 )
a_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(_SCREAMING_SNAKE_CASE )
@property
def A ( self ) -> Optional[int]:
def extract(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
a_ : Optional[int] = torch.ones([0] )
def A ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
self.pixel_values.to(_SCREAMING_SNAKE_CASE )
return self
return Out()
return extract
def A ( self ) -> Optional[Any]:
a_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
a_ : Union[str, Any] = self.dummy_cond_unet
a_ : Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
a_ : List[str] = self.dummy_vae
a_ : List[str] = self.dummy_text_encoder
a_ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
a_ : Union[str, Any] = StableDiffusionPipeline(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
a_ : Optional[Any] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a_ : Tuple = "A painting of a squirrel eating a burger"
a_ : List[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : Union[str, Any] = sd_pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
a_ : List[str] = output.images
a_ : Optional[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : Dict = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_SCREAMING_SNAKE_CASE , )[0]
a_ : List[str] = image[0, -3:, -3:, -1]
a_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a_ : Union[str, Any] = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self ) -> Optional[Any]:
a_ : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
a_ : List[str] = self.dummy_cond_unet
a_ : int = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
a_ : Any = self.dummy_vae
a_ : int = self.dummy_text_encoder
a_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
a_ : str = StableDiffusionPipeline(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
a_ : List[str] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a_ : Dict = "A painting of a squirrel eating a burger"
a_ : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : Union[str, Any] = sd_pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
a_ : Union[str, Any] = output.images
a_ : Union[str, Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : Optional[int] = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_SCREAMING_SNAKE_CASE , )[0]
a_ : Any = image[0, -3:, -3:, -1]
a_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self ) -> List[str]:
a_ : Tuple = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=_SCREAMING_SNAKE_CASE )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert isinstance(pipe.scheduler , _SCREAMING_SNAKE_CASE )
assert pipe.safety_checker is None
a_ : List[Any] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
a_ : Optional[int] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A ( self ) -> Union[str, Any]:
a_ : Tuple = self.dummy_cond_unet
a_ : int = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
a_ : Tuple = self.dummy_vae
a_ : Optional[Any] = self.dummy_text_encoder
a_ : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
a_ : Union[str, Any] = unet.half()
a_ : Optional[Any] = vae.half()
a_ : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
a_ : Tuple = StableDiffusionPipeline(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
a_ : Optional[int] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a_ : int = "A painting of a squirrel eating a burger"
a_ : Tuple = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self ) -> Optional[Any]:
a_ : List[Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a_ : List[str] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a_ : str = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
a_ : Optional[int] = 4_0_0_3_6_6_0_3_4_6
a_ : Optional[int] = 7
# without safety guidance (sld_guidance_scale = 0)
a_ : Tuple = torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
a_ : Any = output.images
a_ : Any = image[0, -3:, -3:, -1]
a_ : List[str] = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
a_ : List[str] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : List[str] = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a_ : List[str] = output.images
a_ : Union[str, Any] = image[0, -3:, -3:, -1]
a_ : List[Any] = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self ) -> Dict:
a_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a_ : Dict = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a_ : Tuple = "padme amidala taking a bath artwork, safe for work, no nudity"
a_ : List[Any] = 2_7_3_4_9_7_1_7_5_5
a_ : Tuple = 7
a_ : Dict = torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : Dict = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
a_ : str = output.images
a_ : Optional[Any] = image[0, -3:, -3:, -1]
a_ : Optional[int] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
a_ : Any = torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : str = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a_ : str = output.images
a_ : Optional[Any] = image[0, -3:, -3:, -1]
a_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self ) -> int:
a_ : Optional[Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
a_ : Dict = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a_ : Tuple = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
a_ : List[str] = 1_0_4_4_3_5_5_2_3_4
a_ : Dict = 1_2
a_ : List[Any] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : Tuple = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
a_ : Any = output.images
a_ : List[str] = image[0, -3:, -3:, -1]
a_ : Tuple = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
a_ : Optional[Any] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : Optional[int] = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a_ : int = output.images
a_ : Union[str, Any] = image[0, -3:, -3:, -1]
a_ : Tuple = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 473
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
__magic_name__ = OpenAIGPTTokenizer
__magic_name__ = OpenAIGPTTokenizerFast
__magic_name__ = True
__magic_name__ = False
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
a : Any = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
a : Optional[Any] = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
a : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(UpperCAmelCase_ ) )
def lowerCamelCase__ ( self : str , A : int ):
'''simple docstring'''
return "lower newer", "lower newer"
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : Dict = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
a : List[Any] = 'lower'
a : str = ['low', 'er</w>']
a : Any = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
a : str = tokens + ['<unk>']
a : Optional[Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCamelCase__ ( self : List[str] , A : List[str]=1_5 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a : Tuple = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
# Simple input
a : Optional[int] = 'This is a simple input'
a : Optional[int] = ['This is a simple input 1', 'This is a simple input 2']
a : int = ('This is a simple input', 'This is a pair')
a : List[Any] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' )
# Simple input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' )
# Simple input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' )
# Pair input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class snake_case ( __UpperCAmelCase ):
pass
| 711
|
"""simple docstring"""
def snake_case (A_ :int , A_ :int ):
'''simple docstring'''
return base * power(A_ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
_UpperCamelCase : Any = int(input('Enter the base: ').strip())
_UpperCamelCase : str = int(input('Enter the exponent: ').strip())
_UpperCamelCase : int = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_UpperCamelCase : Union[str, Any] = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 118
| 0
|
"""simple docstring"""
from timeit import timeit
def _lowercase ( __lowerCAmelCase ) -> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
SCREAMING_SNAKE_CASE__ : Dict = 0
while number:
number &= number - 1
result += 1
return result
def _lowercase ( __lowerCAmelCase ) -> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _lowercase ( ) -> None:
def do_benchmark(__lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__ : Any = """import __main__ as z"""
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(__lowerCAmelCase ) = }''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=__lowerCAmelCase )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(__lowerCAmelCase ) = }''' )
SCREAMING_SNAKE_CASE__ : int = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=__lowerCAmelCase , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(__lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 680
|
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a :str = 637_8137.0
a :Optional[Any] = 635_6752.31_4245
a :List[Any] = 6_378_137
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
SCREAMING_SNAKE_CASE__ : Dict = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
SCREAMING_SNAKE_CASE__ : Dict = atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) )
SCREAMING_SNAKE_CASE__ : Dict = atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
SCREAMING_SNAKE_CASE__ : Tuple = haversine_distance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
SCREAMING_SNAKE_CASE__ : List[str] = (b_lata + b_lata) / 2
SCREAMING_SNAKE_CASE__ : Dict = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
SCREAMING_SNAKE_CASE__ : Tuple = (sin(__lowerCAmelCase ) ** 2) * (cos(__lowerCAmelCase ) ** 2)
SCREAMING_SNAKE_CASE__ : str = cos(sigma / 2 ) ** 2
SCREAMING_SNAKE_CASE__ : List[str] = (sigma - sin(__lowerCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
SCREAMING_SNAKE_CASE__ : int = (cos(__lowerCAmelCase ) ** 2) * (sin(__lowerCAmelCase ) ** 2)
SCREAMING_SNAKE_CASE__ : int = sin(sigma / 2 ) ** 2
SCREAMING_SNAKE_CASE__ : Optional[Any] = (sigma + sin(__lowerCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 572
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
_lowerCamelCase = False
class __a ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __a ( unittest.TestCase ):
def _UpperCAmelCase ( self : Union[str, Any]) ->Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : Union[str, Any]) ->List[Any]:
"""simple docstring"""
_lowercase = VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""")
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowercase__)
pipe.set_progress_bar_config(disable=lowercase__)
_lowercase = """A painting of a squirrel eating a burger """
_lowercase = torch.manual_seed(0)
_lowercase = pipe(
prompt=lowercase__ , generator=lowercase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""").images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase__)
_lowercase = VersatileDiffusionTextToImagePipeline.from_pretrained(lowercase__)
pipe.to(lowercase__)
pipe.set_progress_bar_config(disable=lowercase__)
_lowercase = generator.manual_seed(0)
_lowercase = pipe(
prompt=lowercase__ , generator=lowercase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""").images
assert np.abs(image - new_image).sum() < 1e-5, "Models don't have the same forward pass"
def _UpperCAmelCase ( self : int) ->Any:
"""simple docstring"""
_lowercase = VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa)
pipe.to(lowercase__)
pipe.set_progress_bar_config(disable=lowercase__)
_lowercase = """A painting of a squirrel eating a burger """
_lowercase = torch.manual_seed(0)
_lowercase = pipe(
prompt=lowercase__ , generator=lowercase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""").images
_lowercase = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 572
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a__ )
class a ( a__ ):
"""simple docstring"""
a : List[Any] = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
a : Optional[Any] = Features({'text': Value('string' )} )
a : int = Features({'summary': Value('string' )} )
a : Union[str, Any] = 'text'
a : Optional[Any] = 'summary'
@property
def UpperCAmelCase ( self : List[str] ) -> Tuple:
return {self.text_column: "text", self.summary_column: "summary"}
| 63
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class a ( a__ ):
snake_case__ = '''bert'''
def __init__( self , _snake_case=3_05_22 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
class a ( a__ ):
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 4
| 0
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=100 , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=[0, 1, 2, 3] , ) -> Tuple:
UpperCamelCase_ = parent
UpperCamelCase_ = 100
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = scope
UpperCamelCase_ = out_indices
UpperCamelCase_ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_ = (image_size // patch_size) ** 2
UpperCamelCase_ = num_patches + 1
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCAmelCase ( self ) -> str:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
UpperCamelCase_ = BeitModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
UpperCamelCase_ = BeitForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = self.type_sequence_label_size
UpperCamelCase_ = BeitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase_ = 1
UpperCamelCase_ = BeitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = BeitForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCamelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
A_ = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = BeitModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
pass
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[str]:
if not self.model_tester.is_training:
return
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase_ = False
UpperCamelCase_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = BeitModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _snake_case ():
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values.to(_UpperCAmelCase )
# prepare bool_masked_pos
UpperCamelCase_ = torch.ones((1, 196) , dtype=torch.bool ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , bool_masked_pos=_UpperCAmelCase )
UpperCamelCase_ = outputs.logits
# verify the logits
UpperCamelCase_ = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCamelCase_ = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -13.9174], [-3.2_4_5_6, 0.4_9_4_8, -13.9401], [-3.2_0_3_3, 0.5_1_2_1, -13.8550]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _UpperCAmelCase , atol=1e-2 ) )
@slow
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
UpperCamelCase_ = outputs.logits
# verify the logits
UpperCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCamelCase_ = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
UpperCamelCase_ = 281
self.assertEqual(logits.argmax(-1 ).item() , _UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
UpperCamelCase_ = outputs.logits
# verify the logits
UpperCamelCase_ = torch.Size((1, 21841) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCamelCase_ = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
UpperCamelCase_ = 2396
self.assertEqual(logits.argmax(-1 ).item() , _UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
UpperCamelCase_ = model.to(_UpperCAmelCase )
UpperCamelCase_ = BeitImageProcessor(do_resize=_UpperCAmelCase , size=640 , do_center_crop=_UpperCAmelCase )
UpperCamelCase_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
UpperCamelCase_ = Image.open(ds[0]['file'] )
UpperCamelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
UpperCamelCase_ = outputs.logits
# verify the logits
UpperCamelCase_ = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCamelCase_ = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
UpperCamelCase_ = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=_UpperCAmelCase , )
else:
UpperCamelCase_ = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
UpperCamelCase_ = model.to(_UpperCAmelCase )
UpperCamelCase_ = BeitImageProcessor(do_resize=_UpperCAmelCase , size=640 , do_center_crop=_UpperCAmelCase )
UpperCamelCase_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
UpperCamelCase_ = Image.open(ds[0]['file'] )
UpperCamelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
UpperCamelCase_ = outputs.logits.detach().cpu()
UpperCamelCase_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(500, 300)] )
UpperCamelCase_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
UpperCamelCase_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
UpperCamelCase_ = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 721
|
import os
from math import logaa
def _snake_case (__lowercase = "base_exp.txt"):
UpperCamelCase_ = 0
UpperCamelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__lowercase) , __lowercase))):
UpperCamelCase_ , UpperCamelCase_ = list(map(__lowercase , line.split(',')))
if x * logaa(__lowercase) > largest:
UpperCamelCase_ = x * logaa(__lowercase)
UpperCamelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 618
| 0
|
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger('''transformers.models.speecht5''')
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
hf_model.apply_weight_norm()
UpperCAmelCase__ : Tuple = checkpoint['''input_conv.weight_g''']
UpperCAmelCase__ : Any = checkpoint['''input_conv.weight_v''']
UpperCAmelCase__ : Union[str, Any] = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase__ : int = checkpoint[F"""upsamples.{i}.1.weight_g"""]
UpperCAmelCase__ : str = checkpoint[F"""upsamples.{i}.1.weight_v"""]
UpperCAmelCase__ : List[Any] = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase__ : int = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
UpperCAmelCase__ : List[Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
UpperCAmelCase__ : Any = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
UpperCAmelCase__ : Tuple = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
UpperCAmelCase__ : Tuple = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
UpperCAmelCase__ : List[Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
UpperCAmelCase__ : List[Any] = checkpoint['''output_conv.1.weight_g''']
UpperCAmelCase__ : Union[str, Any] = checkpoint['''output_conv.1.weight_v''']
UpperCAmelCase__ : List[Any] = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> List[str]:
if config_path is not None:
UpperCAmelCase__ : Tuple = SpeechTaHifiGanConfig.from_pretrained(lowerCAmelCase__ )
else:
UpperCAmelCase__ : Union[str, Any] = SpeechTaHifiGanConfig()
UpperCAmelCase__ : Optional[int] = SpeechTaHifiGan(lowerCAmelCase__ )
UpperCAmelCase__ : Any = torch.load(lowerCAmelCase__ )
load_weights(orig_checkpoint['''model''']['''generator'''] , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = np.load(lowerCAmelCase__ )
UpperCAmelCase__ : str = stats[0].reshape(-1 )
UpperCAmelCase__ : Optional[int] = stats[1].reshape(-1 )
UpperCAmelCase__ : Tuple = torch.from_numpy(lowerCAmelCase__ ).float()
UpperCAmelCase__ : Union[str, Any] = torch.from_numpy(lowerCAmelCase__ ).float()
model.save_pretrained(lowerCAmelCase__ )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
UpperCamelCase__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 75
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def lowerCAmelCase_ ( lowercase_ : List[str] , lowercase_ : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = DatasetInfosDict.from_directory(lowercase_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def lowerCAmelCase_ ( lowercase_ : Any , lowercase_ : DatasetInfo ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = str(lowercase_ )
dataset_info.write_to_directory(lowercase_ )
__SCREAMING_SNAKE_CASE : Dict = DatasetInfo.from_directory(lowercase_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowercase_ , '''dataset_info.json''' ) )
def lowerCAmelCase_ ( ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
__SCREAMING_SNAKE_CASE : Optional[int] = dataset_info._to_yaml_dict()
assert sorted(lowercase_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
__SCREAMING_SNAKE_CASE : int = yaml.safe_dump(lowercase_ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = yaml.safe_load(lowercase_ )
assert dataset_info_yaml_dict == reloaded
def lowerCAmelCase_ ( ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetInfo()
__SCREAMING_SNAKE_CASE : Union[str, Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def lowerCAmelCase_ ( lowercase_ : List[Any] , lowercase_ : DatasetInfosDict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = str(lowercase_ )
dataset_infos_dict.write_to_directory(lowercase_ )
__SCREAMING_SNAKE_CASE : Optional[Any] = DatasetInfosDict.from_directory(lowercase_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__SCREAMING_SNAKE_CASE : Optional[int] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__SCREAMING_SNAKE_CASE : Tuple = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowercase_ , '''README.md''' ) )
| 674
| 0
|
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_lowerCAmelCase :Any = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **lowercase__ ) -> str:
super().__init__(**lowercase__ )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__( self , lowercase__ , **lowercase__ ) -> str:
return super().__call__(lowercase__ , **lowercase__ )
def _UpperCamelCase ( self , **lowercase__ ) -> Dict:
SCREAMING_SNAKE_CASE : List[Any] = {}
if "candidate_labels" in kwargs:
SCREAMING_SNAKE_CASE : List[str] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
SCREAMING_SNAKE_CASE : Dict = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def _UpperCamelCase ( self , lowercase__ , lowercase__=None , lowercase__="This is a sound of {}." ) -> Optional[int]:
if isinstance(lowercase__ , lowercase__ ):
if audio.startswith('http://' ) or audio.startswith('https://' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
SCREAMING_SNAKE_CASE : Dict = requests.get(lowercase__ ).content
else:
with open(lowercase__ , 'rb' ) as f:
SCREAMING_SNAKE_CASE : Union[str, Any] = f.read()
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE : Dict = ffmpeg_read(lowercase__ , self.feature_extractor.sampling_rate )
if not isinstance(lowercase__ , np.ndarray ):
raise ValueError('We expect a numpy ndarray as input' )
if len(audio.shape ) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' )
SCREAMING_SNAKE_CASE : Tuple = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='pt' )
SCREAMING_SNAKE_CASE : Dict = candidate_labels
SCREAMING_SNAKE_CASE : Tuple = [hypothesis_template.format(lowercase__ ) for x in candidate_labels]
SCREAMING_SNAKE_CASE : str = self.tokenizer(lowercase__ , return_tensors=self.framework , padding=lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = [text_inputs]
return inputs
def _UpperCamelCase ( self , lowercase__ ) -> Dict:
SCREAMING_SNAKE_CASE : Tuple = model_inputs.pop('candidate_labels' )
SCREAMING_SNAKE_CASE : Optional[Any] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , lowercase__ ):
SCREAMING_SNAKE_CASE : Dict = text_inputs[0]
else:
# Batching case.
SCREAMING_SNAKE_CASE : List[str] = text_inputs[0][0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.model(**lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def _UpperCamelCase ( self , lowercase__ ) -> List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = model_outputs.pop('candidate_labels' )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_outputs['logits'][0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE : Any = logits.softmax(dim=0 )
SCREAMING_SNAKE_CASE : Tuple = probs.tolist()
else:
raise ValueError('`tf` framework not supported.' )
SCREAMING_SNAKE_CASE : Tuple = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(lowercase__ , lowercase__ ) , key=lambda lowercase__ : -x[0] )
]
return result
| 718
|
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase :Tuple = 16
_lowerCAmelCase :List[Any] = 32
def __lowerCAmelCase ( a_ , a_ , a_ , a_ , a_ = 16 ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetDict(
{
'train': dataset['train'].select(a_ ),
'validation': dataset['train'].select(a_ ),
'test': dataset['validation'],
} )
def tokenize_function(a_ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a_ , max_length=a_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE : Union[str, Any] = datasets.map(
a_ , batched=a_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE : Dict = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(a_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE : Any = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE : Any = 8
else:
SCREAMING_SNAKE_CASE : List[Any] = None
return tokenizer.pad(
a_ , padding='longest' , max_length=a_ , pad_to_multiple_of=a_ , return_tensors='pt' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : List[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
SCREAMING_SNAKE_CASE : int = DataLoader(
tokenized_datasets['validation'] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
SCREAMING_SNAKE_CASE : Any = DataLoader(
tokenized_datasets['test'] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
return train_dataloader, eval_dataloader, test_dataloader
def __lowerCAmelCase ( a_ , a_ ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = []
# Download the dataset
SCREAMING_SNAKE_CASE : Any = load_dataset('glue' , 'mrpc' )
# Create our splits
SCREAMING_SNAKE_CASE : Dict = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
SCREAMING_SNAKE_CASE : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : Dict = config['lr']
SCREAMING_SNAKE_CASE : Tuple = int(config['num_epochs'] )
SCREAMING_SNAKE_CASE : Any = int(config['seed'] )
SCREAMING_SNAKE_CASE : Any = int(config['batch_size'] )
SCREAMING_SNAKE_CASE : Optional[int] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE : Dict = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE : int = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(a_ )
# New Code #
# Create our folds:
SCREAMING_SNAKE_CASE : int = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
SCREAMING_SNAKE_CASE : List[str] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(a_ ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = get_fold_dataloaders(
a_ , a_ , a_ , a_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : str = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=a_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE : Dict = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE : int = AdamW(params=model.parameters() , lr=a_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE : int = get_linear_schedule_with_warmup(
optimizer=a_ , num_warmup_steps=100 , num_training_steps=(len(a_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ )
# Now we train the model
for epoch in range(a_ ):
model.train()
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE : Dict = model(**a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.loss
SCREAMING_SNAKE_CASE : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(a_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**a_ )
SCREAMING_SNAKE_CASE : Any = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=a_ , references=a_ , )
SCREAMING_SNAKE_CASE : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , a_ )
# New Code #
# We also run predictions on the test set at the very end
SCREAMING_SNAKE_CASE : Any = []
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**a_ )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(a_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
SCREAMING_SNAKE_CASE : int = torch.cat(a_ , dim=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(a_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
SCREAMING_SNAKE_CASE : Optional[Any] = metric.compute(predictions=a_ , references=a_ )
accelerator.print('Average test metrics from all folds:' , a_ )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=a_ , default=a_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=a_ , default=3 , help='The number of splits to perform across the dataset' )
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(a_ , a_ )
if __name__ == "__main__":
main()
| 179
| 0
|
from __future__ import annotations
_lowerCAmelCase = []
def _snake_case ( __snake_case , __snake_case , __snake_case ):
for i in range(len(__snake_case ) ):
if board[row][i] == 1:
return False
for i in range(len(__snake_case ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__snake_case , -1 , -1 ) , range(__snake_case , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__snake_case , -1 , -1 ) , range(__snake_case , len(__snake_case ) ) ):
if board[i][j] == 1:
return False
return True
def _snake_case ( __snake_case , __snake_case ):
if row >= len(__snake_case ):
solution.append(__snake_case )
printboard(__snake_case )
print()
return True
for i in range(len(__snake_case ) ):
if is_safe(__snake_case , __snake_case , __snake_case ):
_UpperCamelCase = 1
solve(__snake_case , row + 1 )
_UpperCamelCase = 0
return False
def _snake_case ( __snake_case ):
for i in range(len(__snake_case ) ):
for j in range(len(__snake_case ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
_lowerCAmelCase = 8
_lowerCAmelCase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 10
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if "resnet-50" in model_name:
lowerCAmelCase__ : int = ResNetConfig.from_pretrained("""microsoft/resnet-50""" )
elif "resnet-101" in model_name:
lowerCAmelCase__ : Dict = ResNetConfig.from_pretrained("""microsoft/resnet-101""" )
else:
raise ValueError("""Model name should include either resnet50 or resnet101""" )
lowerCAmelCase__ : Tuple = DetrConfig(use_timm_backbone=UpperCamelCase , backbone_config=UpperCamelCase )
# set label attributes
lowerCAmelCase__ : str = """panoptic""" in model_name
if is_panoptic:
lowerCAmelCase__ : Union[str, Any] = 250
else:
lowerCAmelCase__ : Union[str, Any] = 91
lowerCAmelCase__ : Optional[Any] = """huggingface/label-files"""
lowerCAmelCase__ : int = """coco-detection-id2label.json"""
lowerCAmelCase__ : Dict = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase__ : Any = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Union[str, Any] = idalabel
lowerCAmelCase__ : str = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") )
rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") )
rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") )
rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") )
rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
f"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
f"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = state_dict.pop(UpperCamelCase )
lowerCAmelCase__ : Any = val
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = """"""
if is_panoptic:
lowerCAmelCase__ : Union[str, Any] = """detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCAmelCase__ : Optional[int] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCAmelCase__ : Optional[Any] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[:256, :]
lowerCAmelCase__ : Optional[int] = in_proj_bias[:256]
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[256:512, :]
lowerCAmelCase__ : List[Any] = in_proj_bias[256:512]
lowerCAmelCase__ : Optional[int] = in_proj_weight[-256:, :]
lowerCAmelCase__ : Optional[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowerCAmelCase__ : List[str] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCAmelCase__ : int = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : List[str] = in_proj_weight[:256, :]
lowerCAmelCase__ : Any = in_proj_bias[:256]
lowerCAmelCase__ : Optional[Any] = in_proj_weight[256:512, :]
lowerCAmelCase__ : Optional[int] = in_proj_bias[256:512]
lowerCAmelCase__ : List[Any] = in_proj_weight[-256:, :]
lowerCAmelCase__ : Any = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowerCAmelCase__ : Union[str, Any] = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
lowerCAmelCase__ : Dict = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCAmelCase__ : Union[str, Any] = in_proj_weight_cross_attn[:256, :]
lowerCAmelCase__ : Tuple = in_proj_bias_cross_attn[:256]
lowerCAmelCase__ : str = in_proj_weight_cross_attn[256:512, :]
lowerCAmelCase__ : Optional[int] = in_proj_bias_cross_attn[256:512]
lowerCAmelCase__ : Optional[Any] = in_proj_weight_cross_attn[-256:, :]
lowerCAmelCase__ : Optional[int] = in_proj_bias_cross_attn[-256:]
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase__ : Optional[int] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=None , UpperCamelCase=False ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = get_detr_config(UpperCamelCase )
# load original model from torch hub
lowerCAmelCase__ : Union[str, Any] = {
"""detr-resnet-50""": """detr_resnet50""",
"""detr-resnet-101""": """detr_resnet101""",
}
logger.info(f"""Converting model {model_name}...""" )
lowerCAmelCase__ : List[Any] = torch.hub.load("""facebookresearch/detr""" , model_name_to_original_name[model_name] , pretrained=UpperCamelCase ).eval()
lowerCAmelCase__ : str = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(UpperCamelCase ):
if is_panoptic:
lowerCAmelCase__ : List[str] = """detr.""" + src
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase , is_panoptic=UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCAmelCase__ : int = """detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
lowerCAmelCase__ : Optional[Any] = state_dict.pop(UpperCamelCase )
lowerCAmelCase__ : Tuple = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCAmelCase__ : Any = state_dict.pop(UpperCamelCase )
lowerCAmelCase__ : List[str] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
lowerCAmelCase__ : List[Any] = state_dict.pop(UpperCamelCase )
lowerCAmelCase__ : Dict = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
lowerCAmelCase__ : Dict = state_dict.pop(UpperCamelCase )
lowerCAmelCase__ : Dict = val
# finally, create HuggingFace model and load state dict
lowerCAmelCase__ : int = DetrForSegmentation(UpperCamelCase ) if is_panoptic else DetrForObjectDetection(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# verify our conversion on an image
lowerCAmelCase__ : Union[str, Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
lowerCAmelCase__ : List[Any] = DetrImageProcessor(format=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = processor(images=prepare_img() , return_tensors="""pt""" )
lowerCAmelCase__ : Union[str, Any] = encoding["""pixel_values"""]
lowerCAmelCase__ : List[Any] = detr(UpperCamelCase )
lowerCAmelCase__ : List[Any] = model(UpperCamelCase )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("""Uploading PyTorch model and image processor to the hub...""" )
model.push_to_hub(f"""nielsr/{model_name}""" )
processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
_lowerCAmelCase = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 565
| 0
|
'''simple docstring'''
import os
import string
import sys
a = 1 << 8
a = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
a = KEYMAP["up"]
a = KEYMAP["left"]
if sys.platform == "win32":
a = []
a = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
a = ord(str(i))
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
__SCREAMING_SNAKE_CASE : Union[str, Any] = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__UpperCAmelCase ) == 0:
# Read the keystroke
__SCREAMING_SNAKE_CASE : Dict = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__SCREAMING_SNAKE_CASE : Optional[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__SCREAMING_SNAKE_CASE : int = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__UpperCAmelCase )
if ord(__UpperCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = chr(KEYMAP["""esc"""] )
except KeyError:
__SCREAMING_SNAKE_CASE : List[Any] = cha[1]
else:
__SCREAMING_SNAKE_CASE : Dict = ch.decode(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE : Tuple = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__SCREAMING_SNAKE_CASE : Tuple = sys.stdin.fileno()
__SCREAMING_SNAKE_CASE : int = termios.tcgetattr(__UpperCAmelCase )
try:
tty.setraw(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(__UpperCAmelCase , termios.TCSADRAIN , __UpperCAmelCase )
return ch
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = get_raw_chars()
if ord(__UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__UpperCAmelCase ) == KEYMAP["esc"]:
__SCREAMING_SNAKE_CASE : Tuple = get_raw_chars()
if ord(__UpperCAmelCase ) == KEYMAP["mod_int"]:
__SCREAMING_SNAKE_CASE : int = get_raw_chars()
if ord(__UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__UpperCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 716
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __a ( _snake_case ):
__UpperCamelCase : Tuple = 'sew'
def __init__( self : str ,lowerCamelCase : Any=32 ,lowerCamelCase : str=768 ,lowerCamelCase : str=12 ,lowerCamelCase : Union[str, Any]=12 ,lowerCamelCase : Union[str, Any]=3072 ,lowerCamelCase : int=2 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Optional[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Optional[Any]=0.02 ,lowerCamelCase : List[str]=1E-5 ,lowerCamelCase : Tuple="group" ,lowerCamelCase : Optional[Any]="gelu" ,lowerCamelCase : List[str]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,lowerCamelCase : Any=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,lowerCamelCase : Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Dict=128 ,lowerCamelCase : Union[str, Any]=16 ,lowerCamelCase : List[Any]=True ,lowerCamelCase : List[Any]=0.05 ,lowerCamelCase : Optional[int]=10 ,lowerCamelCase : Any=2 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Tuple=10 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple="mean" ,lowerCamelCase : int=False ,lowerCamelCase : Dict=False ,lowerCamelCase : Optional[int]=256 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : Tuple=2 ,**lowerCamelCase : Union[str, Any] ,):
'''simple docstring'''
super().__init__(**lowerCamelCase ,pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = squeeze_factor
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
__SCREAMING_SNAKE_CASE = mask_feature_min_masks
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# sequence classification
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
__SCREAMING_SNAKE_CASE = classifier_proj_size
@property
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 13
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
UpperCamelCase =logging.get_logger(__name__)
class A ( _a ):
"""simple docstring"""
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 208
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self : Optional[int] ):
UpperCAmelCase = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
UpperCAmelCase = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )["last_hidden_state"]
UpperCAmelCase = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape ,__SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
UpperCAmelCase = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 333
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__lowerCamelCase = {
'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoForCausalLM',
'GPTNeoForQuestionAnswering',
'GPTNeoForSequenceClassification',
'GPTNeoForTokenClassification',
'GPTNeoModel',
'GPTNeoPreTrainedModel',
'load_tf_weights_in_gpt_neo',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'FlaxGPTNeoForCausalLM',
'FlaxGPTNeoModel',
'FlaxGPTNeoPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 709
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return str(UpperCAmelCase__ ) == str(UpperCAmelCase__ )[::-1]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return int(UpperCAmelCase__ ) + int(str(UpperCAmelCase__ )[::-1] )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00 ) -> int:
A_ = []
for num in range(1, UpperCAmelCase__ ):
A_ = 0
A_ = num
while iterations < 50:
A_ = sum_reverse(UpperCAmelCase__ )
iterations += 1
if is_palindrome(UpperCAmelCase__ ):
break
else:
lychrel_nums.append(UpperCAmelCase__ )
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667
| 0
|
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
__SCREAMING_SNAKE_CASE : List[str] =[
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] =argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
__SCREAMING_SNAKE_CASE : Union[str, Any] =parser.parse_args()
if args.check_lib:
__SCREAMING_SNAKE_CASE : List[str] =importlib.import_module('transformers')
__SCREAMING_SNAKE_CASE : Any =Path(transformers_module.__file__).parent
else:
__SCREAMING_SNAKE_CASE : List[Any] =Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 135
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : int ):
'''simple docstring'''
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
A: Optional[int] = 4
A: Tuple = (1 << p) - 1
for _ in range(p - 2 ):
A: List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 135
| 1
|
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
_lowerCamelCase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def _lowerCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase :List[Any] =Github(os.environ["""GITHUB_TOKEN"""] )
_UpperCamelCase :Union[str, Any] =g.get_repo("""huggingface/transformers""" )
_UpperCamelCase :List[Any] =repo.get_issues(state="""open""" )
for issue in open_issues:
_UpperCamelCase :Optional[Any] =sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=_lowerCamelCase )
_UpperCamelCase :Tuple =comments[0] if len(_lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 716
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : str = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class lowerCamelCase__ ( __snake_case ):
__UpperCAmelCase = """mvp"""
__UpperCAmelCase = ["""past_key_values"""]
__UpperCAmelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCAmelCase__=50_267 , lowerCAmelCase__=1_024 , lowerCAmelCase__=12 , lowerCAmelCase__=4_096 , lowerCAmelCase__=16 , lowerCAmelCase__=12 , lowerCAmelCase__=4_096 , lowerCAmelCase__=16 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__="gelu" , lowerCAmelCase__=1_024 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=0.0 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=2 , lowerCAmelCase__=2 , lowerCAmelCase__=False , lowerCAmelCase__=100 , lowerCAmelCase__=800 , **lowerCAmelCase__ , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :Dict =vocab_size
_UpperCamelCase :List[Any] =max_position_embeddings
_UpperCamelCase :Tuple =d_model
_UpperCamelCase :List[Any] =encoder_ffn_dim
_UpperCamelCase :Optional[int] =encoder_layers
_UpperCamelCase :List[str] =encoder_attention_heads
_UpperCamelCase :List[Any] =decoder_ffn_dim
_UpperCamelCase :Union[str, Any] =decoder_layers
_UpperCamelCase :int =decoder_attention_heads
_UpperCamelCase :Union[str, Any] =dropout
_UpperCamelCase :Tuple =attention_dropout
_UpperCamelCase :Union[str, Any] =activation_dropout
_UpperCamelCase :Optional[Any] =activation_function
_UpperCamelCase :Dict =init_std
_UpperCamelCase :Optional[Any] =encoder_layerdrop
_UpperCamelCase :List[Any] =decoder_layerdrop
_UpperCamelCase :Optional[int] =classifier_dropout
_UpperCamelCase :Optional[Any] =use_cache
_UpperCamelCase :List[Any] =encoder_layers
_UpperCamelCase :List[str] =scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase :Dict =use_prompt
_UpperCamelCase :Optional[Any] =prompt_length
_UpperCamelCase :Tuple =prompt_mid_dim
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , forced_eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , lowerCAmelCase__ ):
_UpperCamelCase :Dict =self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
| 512
| 0
|
from __future__ import annotations
def _snake_case ( __snake_case ):
return [ord(__snake_case ) - 96 for elem in plain]
def _snake_case ( __snake_case ):
return "".join(chr(elem + 96 ) for elem in encoded )
def _snake_case ( ):
_UpperCamelCase = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , __snake_case )
print('''Decoded:''' , decode(__snake_case ) )
if __name__ == "__main__":
main()
| 10
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class a__ ( a_, a_, unittest.TestCase ):
__lowerCAmelCase = IFPipeline
__lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
__lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __magic_name__ ( self ):
return self._get_dummy_components()
def __magic_name__ ( self , _a , _a=0 ):
if str(_a ).startswith("mps" ):
lowercase : List[str] = torch.manual_seed(_a )
else:
lowercase : Dict = torch.Generator(device=_a ).manual_seed(_a )
lowercase : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __magic_name__ ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __magic_name__ ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __magic_name__ ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __magic_name__ ( self ):
self._test_save_load_local()
def __magic_name__ ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __magic_name__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ):
# if
lowercase : Tuple = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
lowercase : List[str] = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=_a , tokenizer=_a )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
lowercase , lowercase : int = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowercase : List[str] = None
lowercase : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_a , _a , _a , _a )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowercase : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
lowercase : Dict = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_a , _a , _a , _a )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowercase : List[str] = IFInpaintingPipeline(**pipe_a.components )
lowercase : Optional[Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_a , _a , _a , _a )
def __magic_name__ ( self , _a , _a , _a , _a ):
# pipeline 1
_start_torch_memory_measurement()
lowercase : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : Optional[Any] = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , num_inference_steps=2 , generator=_a , output_type="np" , )
lowercase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
lowercase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
lowercase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(_a , _a )
# pipeline 2
_start_torch_memory_measurement()
lowercase : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
lowercase : Optional[int] = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , generator=_a , num_inference_steps=2 , output_type="np" , )
lowercase : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
lowercase : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(_a , _a )
def __magic_name__ ( self , _a , _a , _a , _a ):
# pipeline 1
_start_torch_memory_measurement()
lowercase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
lowercase : Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : Union[str, Any] = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , num_inference_steps=2 , generator=_a , output_type="np" , )
lowercase : Optional[Any] = output.images[0]
assert image.shape == (64, 64, 3)
lowercase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowercase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(_a , _a )
# pipeline 2
_start_torch_memory_measurement()
lowercase : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : Dict = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_a )
lowercase : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
lowercase : Dict = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , original_image=_a , generator=_a , num_inference_steps=2 , output_type="np" , )
lowercase : int = output.images[0]
assert image.shape == (256, 256, 3)
lowercase : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(_a , _a )
def __magic_name__ ( self , _a , _a , _a , _a ):
# pipeline 1
_start_torch_memory_measurement()
lowercase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
lowercase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(_a )
lowercase : Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : Dict = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , mask_image=_a , num_inference_steps=2 , generator=_a , output_type="np" , )
lowercase : Dict = output.images[0]
assert image.shape == (64, 64, 3)
lowercase : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowercase : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(_a , _a )
# pipeline 2
_start_torch_memory_measurement()
lowercase : Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
lowercase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_a )
lowercase : str = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(_a )
lowercase : Dict = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , mask_image=_a , original_image=_a , generator=_a , num_inference_steps=2 , output_type="np" , )
lowercase : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
lowercase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(_a , _a )
def __magic_name__ ( ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 361
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = """trajectory_transformer"""
UpperCAmelCase_ = ["""past_key_values"""]
UpperCAmelCase_ = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self :Union[str, Any] , lowerCamelCase :Optional[int]=100 , lowerCamelCase :Optional[int]=5 , lowerCamelCase :Optional[Any]=1 , lowerCamelCase :Any=1 , lowerCamelCase :int=249 , lowerCamelCase :Optional[Any]=6 , lowerCamelCase :Optional[Any]=17 , lowerCamelCase :Optional[int]=25 , lowerCamelCase :Union[str, Any]=4 , lowerCamelCase :Union[str, Any]=4 , lowerCamelCase :Optional[int]=128 , lowerCamelCase :List[Any]=0.1 , lowerCamelCase :List[str]=0.1 , lowerCamelCase :Tuple=0.1 , lowerCamelCase :Dict=0.00_06 , lowerCamelCase :Dict=512 , lowerCamelCase :Optional[Any]=0.02 , lowerCamelCase :Any=1e-12 , lowerCamelCase :Optional[Any]=1 , lowerCamelCase :List[Any]=True , lowerCamelCase :int=1 , lowerCamelCase :Dict=5_0256 , lowerCamelCase :Union[str, Any]=5_0256 , **lowerCamelCase :int , ) -> Optional[Any]:
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = action_weight
UpperCAmelCase__ = reward_weight
UpperCAmelCase__ = value_weight
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = block_size
UpperCAmelCase__ = action_dim
UpperCAmelCase__ = observation_dim
UpperCAmelCase__ = transition_dim
UpperCAmelCase__ = learning_rate
UpperCAmelCase__ = n_layer
UpperCAmelCase__ = n_head
UpperCAmelCase__ = n_embd
UpperCAmelCase__ = embd_pdrop
UpperCAmelCase__ = attn_pdrop
UpperCAmelCase__ = resid_pdrop
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = kaiming_initializer_range
UpperCAmelCase__ = use_cache
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
| 364
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : Optional[int] = {
"configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"configuration_maskformer_swin": ["MaskFormerSwinConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = ["MaskFormerFeatureExtractor"]
_lowerCAmelCase : Dict = ["MaskFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
_lowerCAmelCase : List[str] = [
"MaskFormerSwinBackbone",
"MaskFormerSwinModel",
"MaskFormerSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 364
| 1
|
'''simple docstring'''
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Optional[Any] = ["input_values", "attention_mask"]
def __init__( self , _lowerCAmelCase = 1 , _lowerCAmelCase = 16000 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = False , _lowerCAmelCase = 80 , _lowerCAmelCase = 16 , _lowerCAmelCase = 64 , _lowerCAmelCase = "hann_window" , _lowerCAmelCase = 1.0 , _lowerCAmelCase = 80 , _lowerCAmelCase = 7600 , _lowerCAmelCase = 1E-10 , _lowerCAmelCase = 2 , _lowerCAmelCase = True , **_lowerCAmelCase , ) -> Optional[int]:
super().__init__(feature_size=_lowerCAmelCase , sampling_rate=_lowerCAmelCase , padding_value=_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = do_normalize
_lowerCAmelCase = return_attention_mask
_lowerCAmelCase = num_mel_bins
_lowerCAmelCase = hop_length
_lowerCAmelCase = win_length
_lowerCAmelCase = win_function
_lowerCAmelCase = frame_signal_scale
_lowerCAmelCase = fmin
_lowerCAmelCase = fmax
_lowerCAmelCase = mel_floor
_lowerCAmelCase = reduction_factor
_lowerCAmelCase = win_length * sampling_rate // 1000
_lowerCAmelCase = hop_length * sampling_rate // 1000
_lowerCAmelCase = optimal_fft_length(self.sample_size )
_lowerCAmelCase = (self.n_fft // 2) + 1
_lowerCAmelCase = window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowerCAmelCase )
_lowerCAmelCase = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , _lowerCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , _lowerCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _snake_case ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
_lowerCAmelCase = np.array(_lowerCAmelCase , np.intaa )
_lowerCAmelCase = []
for vector, length in zip(_lowerCAmelCase , attention_mask.sum(-1 ) ):
_lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_lowerCAmelCase = padding_value
normed_input_values.append(_lowerCAmelCase )
else:
_lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def _snake_case ( self , _lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = spectrogram(
_lowerCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> BatchFeature:
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
_lowerCAmelCase = self._process_audio(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase , )
else:
_lowerCAmelCase = None
if audio_target is not None:
_lowerCAmelCase = self._process_audio(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase , )
if inputs is None:
return inputs_target
else:
_lowerCAmelCase = inputs_target["input_values"]
_lowerCAmelCase = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
_lowerCAmelCase = decoder_attention_mask
return inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> BatchFeature:
_lowerCAmelCase = isinstance(_lowerCAmelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
_lowerCAmelCase = is_batched_numpy or (
isinstance(_lowerCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCAmelCase = [np.asarray(_lowerCAmelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_lowerCAmelCase , np.ndarray ):
_lowerCAmelCase = np.asarray(_lowerCAmelCase , dtype=np.floataa )
elif isinstance(_lowerCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
_lowerCAmelCase = speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCAmelCase = [speech]
# needed to make pad() work on spectrogram inputs
_lowerCAmelCase = self.feature_size
# convert into correct format for padding
if is_target:
_lowerCAmelCase = [self._extract_mel_features(_lowerCAmelCase ) for waveform in speech]
_lowerCAmelCase = BatchFeature({"input_values": features} )
_lowerCAmelCase = self.num_mel_bins
else:
_lowerCAmelCase = BatchFeature({"input_values": speech} )
_lowerCAmelCase = self.pad(
_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , **_lowerCAmelCase , )
_lowerCAmelCase = feature_size_hack
# convert input values to correct format
_lowerCAmelCase = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
_lowerCAmelCase = [np.asarray(_lowerCAmelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_lowerCAmelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
_lowerCAmelCase = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_lowerCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
_lowerCAmelCase = input_values.astype(np.floataa )
# convert attention_mask to correct format
_lowerCAmelCase = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_lowerCAmelCase = [np.asarray(_lowerCAmelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
_lowerCAmelCase = (
attention_mask
if self._get_padding_strategies(_lowerCAmelCase , max_length=_lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=_lowerCAmelCase , padding_value=self.padding_value )
if return_tensors is not None:
_lowerCAmelCase = padded_inputs.convert_to_tensors(_lowerCAmelCase )
return padded_inputs
def _snake_case ( self ) -> Dict[str, Any]:
_lowerCAmelCase = super().to_dict()
# Don't serialize these as they are derived from the other properties.
_lowerCAmelCase = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 18
|
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowercase_ = datasets.load_iris()
lowercase_ = np.array(data['''data'''])
lowercase_ = np.array(data['''target'''])
lowercase_ = data['''target_names''']
lowercase_ , lowercase_ , lowercase_ , lowercase_ = train_test_split(X, y)
def __lowerCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ) -> Optional[int]:
return np.linalg.norm(np.array(__lowerCamelCase ) - np.array(__lowerCamelCase ) )
def __lowerCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=5 ) -> str:
__lowerCAmelCase =zip(__lowerCamelCase , __lowerCamelCase )
# List of distances of all points from the point to be classified
__lowerCAmelCase =[]
for data_point in data:
__lowerCAmelCase =euclidean_distance(data_point[0] , __lowerCamelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__lowerCAmelCase =[i[1] for i in sorted(__lowerCamelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__lowerCAmelCase =Counter(__lowerCamelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 354
| 0
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowercase :
"""simple docstring"""
a__ = BlenderbotSmallConfig
a__ = {}
a__ = "gelu"
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=False , __snake_case=99 , __snake_case=32 , __snake_case=2 , __snake_case=4 , __snake_case=37 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=20 , __snake_case=2 , __snake_case=1 , __snake_case=0 , ):
_UpperCamelCase : List[str] = parent
_UpperCamelCase : List[Any] = batch_size
_UpperCamelCase : Any = seq_length
_UpperCamelCase : Tuple = is_training
_UpperCamelCase : List[str] = use_labels
_UpperCamelCase : Dict = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : Optional[Any] = num_attention_heads
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCamelCase : int = max_position_embeddings
_UpperCamelCase : Optional[int] = eos_token_id
_UpperCamelCase : List[Any] = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
def A__ ( self):
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
_UpperCamelCase : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
_UpperCamelCase : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1)
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase : Any = prepare_blenderbot_small_inputs_dict(__snake_case , __snake_case , __snake_case)
return config, inputs_dict
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : Tuple = TFBlenderbotSmallModel(config=__snake_case).get_decoder()
_UpperCamelCase : Dict = inputs_dict['input_ids']
_UpperCamelCase : Optional[int] = input_ids[:1, :]
_UpperCamelCase : List[Any] = inputs_dict['attention_mask'][:1, :]
_UpperCamelCase : Union[str, Any] = inputs_dict['head_mask']
_UpperCamelCase : Tuple = 1
# first forward pass
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , head_mask=__snake_case , use_cache=__snake_case)
_UpperCamelCase , _UpperCamelCase : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size)
_UpperCamelCase : Tuple = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
_UpperCamelCase : Any = tf.concat([input_ids, next_tokens] , axis=-1)
_UpperCamelCase : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1)
_UpperCamelCase : Union[str, Any] = model(__snake_case , attention_mask=__snake_case)[0]
_UpperCamelCase : Union[str, Any] = model(__snake_case , attention_mask=__snake_case , past_key_values=__snake_case)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
_UpperCamelCase : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1]))
_UpperCamelCase : int = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__snake_case , __snake_case , rtol=1e-3)
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Dict=None , ) -> Any:
'''simple docstring'''
if attention_mask is None:
_UpperCamelCase : Optional[int] = tf.cast(tf.math.not_equal(UpperCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCamelCase : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCamelCase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
a__ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
a__ = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
a__ = True
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Tuple = TFBlenderbotSmallModelTester(self)
_UpperCamelCase : Any = ConfigTester(self , config_class=__snake_case)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__snake_case)
@require_tokenizers
@require_tf
class lowercase ( unittest.TestCase ):
"""simple docstring"""
a__ = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
a__ = "facebook/blenderbot_small-90M"
@cached_property
def A__ ( self):
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M')
@cached_property
def A__ ( self):
_UpperCamelCase : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def A__ ( self):
_UpperCamelCase : Optional[int] = self.tokenizer(self.src_text , return_tensors='tf')
_UpperCamelCase : List[str] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__snake_case , )
_UpperCamelCase : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__snake_case)[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 648
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 1
|
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
__lowerCAmelCase : int = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
__lowerCAmelCase : str = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
__lowerCAmelCase : Optional[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCAmelCase : Dict = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCAmelCase : str = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
__lowerCAmelCase : Any = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCAmelCase : Any = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
__lowerCAmelCase : Union[str, Any] = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCAmelCase : Union[str, Any] = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
__lowerCAmelCase : List[Any] = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCAmelCase : Dict = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
__lowerCAmelCase : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCAmelCase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
__lowerCAmelCase : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
__lowerCAmelCase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
__lowerCAmelCase : List[str] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
__lowerCAmelCase : Optional[int] = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
__lowerCAmelCase : str = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
__lowerCAmelCase : str = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
__lowerCAmelCase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCAmelCase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
__lowerCAmelCase : Dict = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
__lowerCAmelCase : Any = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
__lowerCAmelCase : Tuple = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCAmelCase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
__lowerCAmelCase : Any = ""
__lowerCAmelCase : Optional[Any] = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
__lowerCAmelCase : Optional[int] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCAmelCase : Any = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
assert ReadMe.from_string(UpperCamelCase__ , UpperCamelCase__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
with pytest.raises(UpperCamelCase__ , match=re.escape(expected_error.format(path='''root''' ) ) ):
__UpperCAmelCase = ReadMe.from_string(UpperCamelCase__ , UpperCamelCase__ )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
with pytest.raises(UpperCamelCase__ , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
ReadMe.from_string(UpperCamelCase__ , UpperCamelCase__ , suppress_parsing_errors=UpperCamelCase__ )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = Path(UpperCamelCase__ ) / '''README.md'''
with open(UpperCamelCase__ , '''w+''' ) as readme_file:
readme_file.write(UpperCamelCase__ )
__UpperCAmelCase = ReadMe.from_readme(UpperCamelCase__ , UpperCamelCase__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = Path(UpperCamelCase__ ) / '''README.md'''
with open(UpperCamelCase__ , '''w+''' ) as readme_file:
readme_file.write(UpperCamelCase__ )
__UpperCAmelCase = expected_error.format(path=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ , match=re.escape(UpperCamelCase__ ) ):
__UpperCAmelCase = ReadMe.from_readme(UpperCamelCase__ , UpperCamelCase__ )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = Path(UpperCamelCase__ ) / '''README.md'''
with open(UpperCamelCase__ , '''w+''' ) as readme_file:
readme_file.write(UpperCamelCase__ )
__UpperCAmelCase = expected_error.format(path=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ , match=re.escape(UpperCamelCase__ ) ):
ReadMe.from_readme(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = Path(UpperCamelCase__ ) / '''README.md'''
with open(UpperCamelCase__ , '''w+''' ) as readme_file:
readme_file.write(UpperCamelCase__ )
ReadMe.from_readme(UpperCamelCase__ , UpperCamelCase__ , suppress_parsing_errors=UpperCamelCase__ )
| 262
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__lowerCAmelCase : int = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__lowerCAmelCase : List[Any] = logging.getLogger()
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
__UpperCAmelCase = parser.parse_args()
return args.f
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any="eval" ):
"""simple docstring"""
__UpperCAmelCase = os.path.join(UpperCamelCase__ , f"""{split}_results.json""" )
if os.path.exists(UpperCamelCase__ ):
with open(UpperCamelCase__ , '''r''' ) as f:
return json.load(UpperCamelCase__ )
raise ValueError(f"""can't find {path}""" )
__lowerCAmelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( UpperCAmelCase ):
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(__a , '''argv''' , __a ):
run_flax_glue.main()
__UpperCAmelCase = get_results(__a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
@slow
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(__a , '''argv''' , __a ):
run_clm_flax.main()
__UpperCAmelCase = get_results(__a )
self.assertLess(result['''eval_perplexity'''] , 1_0_0 )
@slow
def snake_case__ ( self : Dict ) -> List[str]:
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(__a , '''argv''' , __a ):
run_summarization_flax.main()
__UpperCAmelCase = get_results(__a , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def snake_case__ ( self : Any ) -> List[Any]:
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(__a , '''argv''' , __a ):
run_mlm_flax.main()
__UpperCAmelCase = get_results(__a )
self.assertLess(result['''eval_perplexity'''] , 4_2 )
@slow
def snake_case__ ( self : Dict ) -> str:
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(__a , '''argv''' , __a ):
run_ta_mlm_flax.main()
__UpperCAmelCase = get_results(__a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.4_2 )
@slow
def snake_case__ ( self : Dict ) -> Tuple:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCAmelCase = 7 if get_gpu_count() > 1 else 2
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(__a , '''argv''' , __a ):
run_flax_ner.main()
__UpperCAmelCase = get_results(__a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(__a , '''argv''' , __a ):
run_qa.main()
__UpperCAmelCase = get_results(__a )
self.assertGreaterEqual(result['''eval_f1'''] , 3_0 )
self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
| 262
| 1
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , a : Optional[int] , a : List[str]=3 , a : List[str]=32 , a : Optional[Any]=3 , a : List[Any]=10 , a : Union[str, Any]=[8, 16, 32, 64] , a : List[str]=[1, 1, 2, 1] , a : List[str]=True , a : Optional[int]=True , a : Any="relu" , a : Union[str, Any]=3 , a : int=None , a : str=["stage2", "stage3", "stage4"] , a : List[Any]=[2, 3, 4] , a : Union[str, Any]=1 , )-> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = embeddings_size
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = len(a )
lowercase__ = out_features
lowercase__ = out_indices
lowercase__ = num_groups
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Optional[int]:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE_ ( self : str , a : Any , a : Dict , a : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = BitModel(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : List[str] , a : Optional[int] , a : List[str] )-> List[str]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = BitForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : List[Any] , a : List[Any] , a : List[str] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = BitBackbone(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ = None
lowercase__ = BitBackbone(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_UpperCamelCase : Dict = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : Dict = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Any = False
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[int]:
"""simple docstring"""
lowercase__ = BitModelTester(self )
lowercase__ = ConfigTester(self , config_class=a , has_text_modality=a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]:
"""simple docstring"""
return
@unittest.skip(reason='Bit does not output attentions' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def SCREAMING_SNAKE_CASE_ ( self : str )-> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(config=a )
for name, module in model.named_modules():
if isinstance(a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
def check_hidden_states_output(a : int , a : Optional[Any] , a : Dict ):
lowercase__ = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(a , a ) )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ = layer_type
lowercase__ = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(a , a , a )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = BitModel.from_pretrained(a )
self.assertIsNotNone(a )
def __UpperCamelCase () -> Dict:
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Dict:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Dict:
"""simple docstring"""
lowercase__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(a )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(**a )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[str] = (BitBackbone,) if is_torch_available() else ()
_UpperCamelCase : Any = BitConfig
_UpperCamelCase : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = BitModelTester(self )
| 717
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
stooge(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
return arr
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowercase__ , lowercase__ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowercase__ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
# Recursively sort last 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , i + t , (_SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 45
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case : str = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ['MobileViTFeatureExtractor']
snake_case : Optional[Any] = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[str] = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
snake_case : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 605
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
__UpperCAmelCase = {'target_lang': 'fi', 'source_lang': 'en'}
__UpperCAmelCase = '>>zh<<'
__UpperCAmelCase = 'Helsinki-NLP/'
if is_torch_available():
__UpperCAmelCase = 'pt'
elif is_tf_available():
__UpperCAmelCase = 'tf'
else:
__UpperCAmelCase = 'jax'
@require_sentencepiece
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = MarianTokenizer
snake_case_ = False
snake_case_ = True
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase__ : Optional[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
UpperCAmelCase__ : int = dict(zip(A ,range(len(A ) ) ) )
UpperCAmelCase__ : Optional[int] = Path(self.tmpdirname )
save_json(A ,save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(A ,save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(A ,save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(A ,save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
UpperCAmelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self : List[Any] ,**A : List[Any] ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname ,**A )
def __lowercase ( self : Union[str, Any] ,A : Tuple ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = """</s>"""
UpperCAmelCase__ : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) ,A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) ,A )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""</s>""" )
self.assertEqual(vocab_keys[1] ,"""<unk>""" )
self.assertEqual(vocab_keys[-1] ,"""<pad>""" )
self.assertEqual(len(A ) ,9 )
def __lowercase ( self : Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,9 )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de" )
UpperCAmelCase__ : List[str] = en_de_tokenizer(["""I am a small frog"""] ,return_tensors=A )
self.assertIsInstance(A ,A )
UpperCAmelCase__ : str = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(A ,batch.input_ids[0] )
UpperCAmelCase__ : Optional[Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(A )
UpperCAmelCase__ : Tuple = [x.name for x in Path(A ).glob("""*""" )]
self.assertIn("""source.spm""" ,A )
MarianTokenizer.from_pretrained(A )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Any = tok(
["""I am a small frog""" * 1_000, """I am a small frog"""] ,padding=A ,truncation=A ,return_tensors=A )
self.assertIsInstance(A ,A )
self.assertEqual(batch.input_ids.shape ,(2, 512) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_tokenizer()
UpperCAmelCase__ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] ,padding=A ,return_tensors=A )
self.assertIsInstance(A ,A )
self.assertEqual(batch_smaller.input_ids.shape ,(2, 10) )
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
# fmt: off
UpperCAmelCase__ : Optional[int] = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A ,model_name="""Helsinki-NLP/opus-mt-en-de""" ,revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" ,decode_kwargs={"""use_source_tokenizer""": True} ,)
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
UpperCAmelCase__ : Any = """Tämä on testi"""
UpperCAmelCase__ : int = """This is a test"""
UpperCAmelCase__ : List[str] = [76, 7, 2_047, 2]
UpperCAmelCase__ : Optional[Any] = [69, 12, 11, 940, 2]
UpperCAmelCase__ : List[str] = tokenizer(A ).input_ids
self.assertListEqual(A ,A )
UpperCAmelCase__ : Optional[int] = tokenizer(text_target=A ).input_ids
self.assertListEqual(A ,A )
UpperCAmelCase__ : int = tokenizer.decode(A ,skip_special_tokens=A )
self.assertEqual(A ,A )
| 65
| 0
|
UpperCamelCase_ = [
9_99,
8_00,
7_99,
6_00,
5_99,
5_00,
4_00,
3_99,
3_77,
3_55,
3_33,
3_11,
2_88,
2_66,
2_44,
2_22,
2_00,
1_99,
1_77,
1_55,
1_33,
1_11,
88,
66,
44,
22,
0,
]
UpperCamelCase_ = [
9_99,
9_76,
9_52,
9_28,
9_05,
8_82,
8_58,
8_57,
8_10,
7_62,
7_15,
7_14,
5_72,
4_29,
4_28,
2_86,
2_85,
2_38,
1_90,
1_43,
1_42,
1_18,
95,
71,
47,
24,
0,
]
UpperCamelCase_ = [
9_99,
9_88,
9_77,
9_66,
9_55,
9_44,
9_33,
9_22,
9_11,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_50,
3_00,
2_99,
2_66,
2_33,
2_00,
1_99,
1_79,
1_59,
1_40,
1_20,
1_00,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
UpperCamelCase_ = [
9_99,
9_95,
9_92,
9_89,
9_85,
9_81,
9_78,
9_75,
9_71,
9_67,
9_64,
9_61,
9_57,
9_56,
9_51,
9_47,
9_42,
9_37,
9_33,
9_28,
9_23,
9_19,
9_14,
9_13,
9_08,
9_03,
8_97,
8_92,
8_87,
8_81,
8_76,
8_71,
8_70,
8_64,
8_58,
8_52,
8_46,
8_40,
8_34,
8_28,
8_27,
8_20,
8_13,
8_06,
7_99,
7_92,
7_85,
7_84,
7_77,
7_70,
7_63,
7_56,
7_49,
7_42,
7_41,
7_33,
7_24,
7_16,
7_07,
6_99,
6_98,
6_88,
6_77,
6_66,
6_56,
6_55,
6_45,
6_34,
6_23,
6_13,
6_12,
5_98,
5_84,
5_70,
5_69,
5_55,
5_41,
5_27,
5_26,
5_05,
4_84,
4_83,
4_62,
4_40,
4_39,
3_96,
3_95,
3_52,
3_51,
3_08,
3_07,
2_64,
2_63,
2_20,
2_19,
1_76,
1_32,
88,
44,
0,
]
UpperCamelCase_ = [
9_99,
9_97,
9_95,
9_92,
9_90,
9_88,
9_86,
9_84,
9_81,
9_79,
9_77,
9_75,
9_72,
9_70,
9_68,
9_66,
9_64,
9_61,
9_59,
9_57,
9_56,
9_54,
9_51,
9_49,
9_46,
9_44,
9_41,
9_39,
9_36,
9_34,
9_31,
9_29,
9_26,
9_24,
9_21,
9_19,
9_16,
9_14,
9_13,
9_10,
9_07,
9_05,
9_02,
8_99,
8_96,
8_93,
8_91,
8_88,
8_85,
8_82,
8_79,
8_77,
8_74,
8_71,
8_70,
8_67,
8_64,
8_61,
8_58,
8_55,
8_52,
8_49,
8_46,
8_43,
8_40,
8_37,
8_34,
8_31,
8_28,
8_27,
8_24,
8_21,
8_17,
8_14,
8_11,
8_08,
8_04,
8_01,
7_98,
7_95,
7_91,
7_88,
7_85,
7_84,
7_80,
7_77,
7_74,
7_70,
7_66,
7_63,
7_60,
7_56,
7_52,
7_49,
7_46,
7_42,
7_41,
7_37,
7_33,
7_30,
7_26,
7_22,
7_18,
7_14,
7_10,
7_07,
7_03,
6_99,
6_98,
6_94,
6_90,
6_85,
6_81,
6_77,
6_73,
6_69,
6_64,
6_60,
6_56,
6_55,
6_50,
6_46,
6_41,
6_36,
6_32,
6_27,
6_22,
6_18,
6_13,
6_12,
6_07,
6_02,
5_96,
5_91,
5_86,
5_80,
5_75,
5_70,
5_69,
5_63,
5_57,
5_51,
5_45,
5_39,
5_33,
5_27,
5_26,
5_19,
5_12,
5_05,
4_98,
4_91,
4_84,
4_83,
4_74,
4_66,
4_57,
4_49,
4_40,
4_39,
4_28,
4_18,
4_07,
3_96,
3_95,
3_81,
3_66,
3_52,
3_51,
3_30,
3_08,
3_07,
2_86,
2_64,
2_63,
2_42,
2_20,
2_19,
1_76,
1_75,
1_32,
1_31,
88,
44,
0,
]
UpperCamelCase_ = [
9_99,
9_91,
9_82,
9_74,
9_66,
9_58,
9_50,
9_41,
9_33,
9_25,
9_16,
9_08,
9_00,
8_99,
8_74,
8_50,
8_25,
8_00,
7_99,
7_00,
6_00,
5_00,
4_00,
3_00,
2_00,
1_00,
0,
]
UpperCamelCase_ = [
9_99,
9_92,
9_85,
9_78,
9_71,
9_64,
9_57,
9_49,
9_42,
9_35,
9_28,
9_21,
9_14,
9_07,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_00,
2_99,
2_00,
1_99,
1_00,
99,
0,
]
UpperCamelCase_ = [
9_99,
9_96,
9_92,
9_89,
9_85,
9_82,
9_79,
9_75,
9_72,
9_68,
9_65,
9_61,
9_58,
9_55,
9_51,
9_48,
9_44,
9_41,
9_38,
9_34,
9_31,
9_27,
9_24,
9_20,
9_17,
9_14,
9_10,
9_07,
9_03,
9_00,
8_99,
8_91,
8_84,
8_76,
8_69,
8_61,
8_53,
8_46,
8_38,
8_30,
8_23,
8_15,
8_08,
8_00,
7_99,
7_88,
7_77,
7_66,
7_55,
7_44,
7_33,
7_22,
7_11,
7_00,
6_99,
6_88,
6_77,
6_66,
6_55,
6_44,
6_33,
6_22,
6_11,
6_00,
5_99,
5_85,
5_71,
5_57,
5_42,
5_28,
5_14,
5_00,
4_99,
4_85,
4_71,
4_57,
4_42,
4_28,
4_14,
4_00,
3_99,
3_79,
3_59,
3_40,
3_20,
3_00,
2_99,
2_79,
2_59,
2_40,
2_20,
2_00,
1_99,
1_66,
1_33,
1_00,
99,
66,
33,
0,
]
| 704
|
UpperCamelCase_ = 'Input must be a string of 8 numbers plus letter'
UpperCamelCase_ = 'TRWAGMYFPDXBNJZSQVHLCKE'
def _UpperCAmelCase ( A ):
'''simple docstring'''
if not isinstance(A , A ):
UpperCAmelCase__ =F"""Expected string as input, found {type(A ).__name__}"""
raise TypeError(A )
UpperCAmelCase__ =spanish_id.replace("-" , "" ).upper()
if len(A ) != 9:
raise ValueError(A )
try:
UpperCAmelCase__ =int(spanish_id_clean[0:8] )
UpperCAmelCase__ =spanish_id_clean[8]
except ValueError as ex:
raise ValueError(A ) from ex
if letter.isdigit():
raise ValueError(A )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 510
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : List[str] = KandinskyImgaImgPipeline
_UpperCamelCase : List[str] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
_UpperCamelCase : Dict = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
_UpperCamelCase : Optional[int] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCamelCase : Tuple = False
@property
def __a ( self ):
return 3_2
@property
def __a ( self ):
return 3_2
@property
def __a ( self ):
return self.time_input_dim
@property
def __a ( self ):
return self.time_input_dim * 4
@property
def __a ( self ):
return 1_0_0
@property
def __a ( self ):
_lowercase : int = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Tuple = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
_lowercase : Any = MultilingualCLIP(_lowerCAmelCase )
_lowercase : List[str] = text_encoder.eval()
return text_encoder
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Tuple = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowercase : Tuple = UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def __a ( self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self ):
_lowercase : Dict = self.dummy_text_encoder
_lowercase : Dict = self.dummy_tokenizer
_lowercase : Any = self.dummy_unet
_lowercase : Optional[int] = self.dummy_movq
_lowercase : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.0_00_85,
'beta_end': 0.0_12,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_lowercase : int = DDIMScheduler(**_lowerCAmelCase )
_lowercase : Tuple = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
_lowercase : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowercase : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCAmelCase )
# create init_image
_lowercase : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowercase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowercase : Union[str, Any] = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
if str(_lowerCAmelCase ).startswith('mps' ):
_lowercase : str = torch.manual_seed(_lowerCAmelCase )
else:
_lowercase : Optional[int] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowercase : Optional[int] = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __a ( self ):
_lowercase : List[Any] = 'cpu'
_lowercase : Union[str, Any] = self.get_dummy_components()
_lowercase : str = self.pipeline_class(**_lowerCAmelCase )
_lowercase : List[str] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Dict = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
_lowercase : Tuple = output.images
_lowercase : Union[str, Any] = pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
_lowercase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_lowercase : Union[str, Any] = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
_lowercase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
_lowercase : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowercase : Optional[int] = 'A red cartoon frog, 4k'
_lowercase : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
_lowercase : Tuple = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
_lowercase : Tuple = pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
_lowercase , _lowercase : Any = pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowercase : Any = pipeline(
_lowerCAmelCase , image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='np' , )
_lowercase : Union[str, Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 66
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["ConvNextFeatureExtractor"]
UpperCamelCase = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['LayoutLMv2FeatureExtractor']
__lowerCamelCase = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 213
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = IFInpaintingSuperResolutionPipeline
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
UpperCAmelCase__ = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
return self._get_superresolution_dummy_components()
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Any=0 ) -> Dict:
if str(__snake_case ).startswith("""mps""" ):
__magic_name__: int = torch.manual_seed(__snake_case )
else:
__magic_name__: List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__: Tuple = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: List[str] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase__ ( self : Dict ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase__ ( self : int ) -> str:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
self._test_save_load_local()
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 213
| 1
|
def _lowerCAmelCase ( _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
__snake_case = sorted(string.lower() )
return len(_lowerCAmelCase ) == len(set(_lowerCAmelCase ) )
if __name__ == "__main__":
A : Union[str, Any] = input('Enter a string ').strip()
A : Union[str, Any] = is_isogram(input_str)
print(f'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 371
|
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 356
| 0
|
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
lowerCAmelCase__ = "https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowerCAmelCase__ = BASE_URL + "/user"
# https://github.com/settings/tokens
lowerCAmelCase__ = os.environ.get("USER_TOKEN", "")
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> dict[Any, Any]:
UpperCAmelCase_ : List[str] = {
'Authorization': f"""token {auth_token}""",
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(UpperCamelCase ,headers=UpperCamelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'{key}: {value}')
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 471
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowercase ( a_ ):
pass
class lowercase :
def __init__( self , _snake_case) -> None:
UpperCAmelCase_ : Any = data
UpperCAmelCase_ : Node | None = None
def __iter__( self) -> Optional[int]:
UpperCAmelCase_ : int = self
UpperCAmelCase_ : List[str] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_snake_case)
yield node.data
UpperCAmelCase_ : Tuple = node.next_node
@property
def _snake_case ( self) -> bool:
try:
list(self)
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowerCAmelCase__ = Node(1)
lowerCAmelCase__ = Node(2)
lowerCAmelCase__ = Node(3)
lowerCAmelCase__ = Node(4)
print(root_node.has_loop) # False
lowerCAmelCase__ = root_node.next_node
print(root_node.has_loop) # True
lowerCAmelCase__ = Node(5)
lowerCAmelCase__ = Node(6)
lowerCAmelCase__ = Node(5)
lowerCAmelCase__ = Node(6)
print(root_node.has_loop) # False
lowerCAmelCase__ = Node(1)
print(root_node.has_loop) # False
| 471
| 1
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Tuple = pd.read_csv("""sample_data.csv""", header=None)
__SCREAMING_SNAKE_CASE : Union[str, Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
__SCREAMING_SNAKE_CASE : List[Any] = df.iloc[:, 1:2]
__SCREAMING_SNAKE_CASE : Any = actual_data.values.reshape(len_data, 1)
__SCREAMING_SNAKE_CASE : Dict = MinMaxScaler().fit_transform(actual_data)
__SCREAMING_SNAKE_CASE : Union[str, Any] = 10
__SCREAMING_SNAKE_CASE : Optional[int] = 5
__SCREAMING_SNAKE_CASE : Optional[int] = 20
__SCREAMING_SNAKE_CASE : List[str] = len_data - periods * look_back
__SCREAMING_SNAKE_CASE : List[str] = actual_data[:division]
__SCREAMING_SNAKE_CASE : Union[str, Any] = actual_data[division - look_back :]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = [], []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__SCREAMING_SNAKE_CASE : List[str] = np.array(train_x)
__SCREAMING_SNAKE_CASE : List[Any] = np.array(test_x)
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([list(i.ravel()) for i in train_y])
__SCREAMING_SNAKE_CASE : List[str] = np.array([list(i.ravel()) for i in test_y])
__SCREAMING_SNAKE_CASE : Dict = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
__SCREAMING_SNAKE_CASE : Union[str, Any] = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
__SCREAMING_SNAKE_CASE : Optional[Any] = model.predict(x_test)
| 244
|
'''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return number | (1 << position)
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 244
| 1
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(snake_case , max_perimeter + 1 ):
SCREAMING_SNAKE_CASE:Optional[Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(snake_case ):
SCREAMING_SNAKE_CASE:int = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def A_ ( snake_case = 1000 ):
SCREAMING_SNAKE_CASE:List[Any] = pythagorean_triple(snake_case )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''')
| 465
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A_ = logging.get_logger(__name__)
class _snake_case ( _a ):
_A : Any = ['''input_features''', '''is_longer''']
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : str=64 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=48_000 ,SCREAMING_SNAKE_CASE__ : List[str]=480 ,SCREAMING_SNAKE_CASE__ : Optional[int]=10 ,SCREAMING_SNAKE_CASE__ : List[Any]=1_024 ,SCREAMING_SNAKE_CASE__ : List[str]=0.0 ,SCREAMING_SNAKE_CASE__ : Dict=False ,SCREAMING_SNAKE_CASE__ : float = 0 ,SCREAMING_SNAKE_CASE__ : float = 14_000 ,SCREAMING_SNAKE_CASE__ : int = None ,SCREAMING_SNAKE_CASE__ : str = "fusion" ,SCREAMING_SNAKE_CASE__ : str = "repeatpad" ,**SCREAMING_SNAKE_CASE__ : int ,):
super().__init__(
feature_size=SCREAMING_SNAKE_CASE__ ,sampling_rate=SCREAMING_SNAKE_CASE__ ,padding_value=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
SCREAMING_SNAKE_CASE:Dict = top_db
SCREAMING_SNAKE_CASE:Union[str, Any] = truncation
SCREAMING_SNAKE_CASE:Optional[Any] = padding
SCREAMING_SNAKE_CASE:int = fft_window_size
SCREAMING_SNAKE_CASE:Tuple = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE:Dict = hop_length
SCREAMING_SNAKE_CASE:List[Any] = max_length_s
SCREAMING_SNAKE_CASE:Union[str, Any] = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE:int = sampling_rate
SCREAMING_SNAKE_CASE:Optional[int] = frequency_min
SCREAMING_SNAKE_CASE:List[str] = frequency_max
SCREAMING_SNAKE_CASE:int = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=SCREAMING_SNAKE_CASE__ ,min_frequency=SCREAMING_SNAKE_CASE__ ,max_frequency=SCREAMING_SNAKE_CASE__ ,sampling_rate=SCREAMING_SNAKE_CASE__ ,norm=SCREAMING_SNAKE_CASE__ ,mel_scale="htk" ,)
SCREAMING_SNAKE_CASE:Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=SCREAMING_SNAKE_CASE__ ,min_frequency=SCREAMING_SNAKE_CASE__ ,max_frequency=SCREAMING_SNAKE_CASE__ ,sampling_rate=SCREAMING_SNAKE_CASE__ ,norm="slaney" ,mel_scale="slaney" ,)
def __UpperCamelCase ( self : str ):
SCREAMING_SNAKE_CASE:List[str] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : np.array ,SCREAMING_SNAKE_CASE__ : Optional[np.array] = None ):
SCREAMING_SNAKE_CASE:Dict = spectrogram(
SCREAMING_SNAKE_CASE__ ,window_function(self.fft_window_size ,"hann" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=SCREAMING_SNAKE_CASE__ ,log_mel="dB" ,)
return log_mel_spectrogram.T
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ):
SCREAMING_SNAKE_CASE:Tuple = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE:Tuple = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE:Optional[Any] = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE:Optional[Any] = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE:Optional[Any] = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE:Any = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE:Union[str, Any] = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE:Any = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE:Optional[int] = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE:List[str] = torch.nn.functional.interpolate(
SCREAMING_SNAKE_CASE__ ,size=[chunk_frames, 64] ,mode="bilinear" ,align_corners=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE:Any = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : np.array ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE:Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE:Dict = len(SCREAMING_SNAKE_CASE__ ) - max_length
SCREAMING_SNAKE_CASE:List[str] = np.random.randint(0 ,overflow + 1 )
SCREAMING_SNAKE_CASE:int = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE:Optional[int] = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE__ ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE:Union[str, Any] = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE__ ,self.mel_filters )
SCREAMING_SNAKE_CASE:Optional[int] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE:Dict = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE:Union[str, Any] = np.stack([mel, mel, mel, mel] ,axis=0 )
SCREAMING_SNAKE_CASE:Dict = False
else:
SCREAMING_SNAKE_CASE:Optional[Any] = self._random_mel_fusion(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
SCREAMING_SNAKE_CASE:List[str] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE:List[Any] = int(max_length / len(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE:Dict = np.stack(np.tile(SCREAMING_SNAKE_CASE__ ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE:Dict = int(max_length / len(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE:int = np.stack(np.tile(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE:Tuple = np.pad(SCREAMING_SNAKE_CASE__ ,(0, max_length - waveform.shape[0]) ,mode="constant" ,constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE:List[Any] = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE__ ,self.mel_filters )
SCREAMING_SNAKE_CASE:Dict = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
SCREAMING_SNAKE_CASE:Any = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE__ ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : str ,SCREAMING_SNAKE_CASE__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,SCREAMING_SNAKE_CASE__ : str = None ,SCREAMING_SNAKE_CASE__ : Optional[str] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
SCREAMING_SNAKE_CASE:Union[str, Any] = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE:List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
SCREAMING_SNAKE_CASE:Optional[Any] = isinstance(SCREAMING_SNAKE_CASE__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE:Dict = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE:Any = [np.asarray(SCREAMING_SNAKE_CASE__ ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE__ ,np.ndarray ):
SCREAMING_SNAKE_CASE:str = np.asarray(SCREAMING_SNAKE_CASE__ ,dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE:Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE:List[str] = [np.asarray(SCREAMING_SNAKE_CASE__ )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE:List[str] = [
self._get_input_mel(SCREAMING_SNAKE_CASE__ ,max_length if max_length else self.nb_max_samples ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE:Union[str, Any] = []
SCREAMING_SNAKE_CASE:Dict = []
for mel, longer in padded_inputs:
input_mel.append(SCREAMING_SNAKE_CASE__ )
is_longer.append(SCREAMING_SNAKE_CASE__ )
if truncation == "fusion" and sum(SCREAMING_SNAKE_CASE__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE:str = np.random.randint(0 ,len(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE:str = True
if isinstance(input_mel[0] ,SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:Dict = [np.asarray(SCREAMING_SNAKE_CASE__ ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE:int = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE:Tuple = {"input_features": input_mel, "is_longer": is_longer}
SCREAMING_SNAKE_CASE:str = BatchFeature(SCREAMING_SNAKE_CASE__ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE:List[str] = input_features.convert_to_tensors(SCREAMING_SNAKE_CASE__ )
return input_features
| 465
| 1
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __lowerCAmelCase ( __a ):
def lowerCamelCase ( self ):
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(lowerCAmelCase__ )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self._create_example_records()
__lowerCamelCase = Dataset.from_list(lowerCAmelCase__ )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(lowerCAmelCase__ ):
self.assertDictEqual(lowerCAmelCase__ , example_records[i] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self._create_example_records()
__lowerCamelCase = Dataset.from_list(lowerCAmelCase__ )
__lowerCamelCase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowerCamelCase ( self ): # checks what happens with missing columns
'''simple docstring'''
__lowerCamelCase = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
__lowerCamelCase = Dataset.from_list(lowerCAmelCase__ )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def lowerCamelCase ( self ): # checks if the type can be inferred from the second record
'''simple docstring'''
__lowerCamelCase = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
__lowerCamelCase = Dataset.from_list(lowerCAmelCase__ )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = Dataset.from_list([] )
self.assertEqual(len(lowerCAmelCase__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 175
|
def _lowercase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ):
if height >= 1:
move_tower(height - 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
move_disk(__UpperCamelCase , __UpperCamelCase )
move_tower(height - 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _lowercase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] ):
print("""moving disk from""" , __UpperCamelCase , """to""" , __UpperCamelCase )
def _lowercase ( ):
snake_case__ = int(input("""Height of hanoi: """ ).strip() )
move_tower(__UpperCamelCase , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 214
| 0
|
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
UpperCAmelCase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert("""RGB""" )
UpperCAmelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ),
] )
UpperCAmelCase = transform(_snake_case ).unsqueeze(0 ).to(_snake_case )
return image
def _a ( _snake_case ):
"""simple docstring"""
if "visual_encoder" in key:
UpperCAmelCase = re.sub("""visual_encoder*""" , """vision_model.encoder""" , _snake_case )
if "blocks" in key:
UpperCAmelCase = re.sub(R"""blocks""" , """layers""" , _snake_case )
if "attn" in key:
UpperCAmelCase = re.sub(R"""attn""" , """self_attn""" , _snake_case )
if "norm1" in key:
UpperCAmelCase = re.sub(R"""norm1""" , """layer_norm1""" , _snake_case )
if "norm2" in key:
UpperCAmelCase = re.sub(R"""norm2""" , """layer_norm2""" , _snake_case )
if "encoder.norm" in key:
UpperCAmelCase = re.sub(R"""encoder.norm""" , """post_layernorm""" , _snake_case )
if "encoder.patch_embed.proj" in key:
UpperCAmelCase = re.sub(R"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , _snake_case )
if "encoder.pos_embed" in key:
UpperCAmelCase = re.sub(R"""encoder.pos_embed""" , """embeddings.position_embedding""" , _snake_case )
if "encoder.cls_token" in key:
UpperCAmelCase = re.sub(R"""encoder.cls_token""" , """embeddings.class_embedding""" , _snake_case )
if "self_attn" in key:
UpperCAmelCase = re.sub(R"""self_attn.proj""" , """self_attn.projection""" , _snake_case )
return key
@torch.no_grad()
def _a ( _snake_case , _snake_case=None ):
"""simple docstring"""
if config_path is not None:
UpperCAmelCase = BlipConfig.from_pretrained(_snake_case )
else:
UpperCAmelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
UpperCAmelCase = BlipForConditionalGeneration(_snake_case ).eval()
UpperCAmelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
UpperCAmelCase = blip_decoder(pretrained=_snake_case , image_size=384 , vit="""base""" )
UpperCAmelCase = pt_model.eval()
UpperCAmelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase = modified_state_dict.pop(_snake_case )
UpperCAmelCase = rename_key(_snake_case )
UpperCAmelCase = value
hf_model.load_state_dict(_snake_case )
UpperCAmelCase = 384
UpperCAmelCase = load_demo_image(image_size=_snake_case , device="""cpu""" )
UpperCAmelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
UpperCAmelCase = tokenizer(["""a picture of"""] ).input_ids
UpperCAmelCase = hf_model.generate(_snake_case , _snake_case )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
UpperCAmelCase = hf_model.generate(_snake_case )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_snake_case )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCAmelCase = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
UpperCAmelCase = blip_vqa(pretrained=_snake_case , image_size=_snake_case , vit="""base""" )
vqa_model.eval()
UpperCAmelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase = modified_state_dict.pop(_snake_case )
UpperCAmelCase = rename_key(_snake_case )
UpperCAmelCase = value
UpperCAmelCase = BlipForQuestionAnswering(_snake_case )
hf_vqa_model.load_state_dict(_snake_case )
UpperCAmelCase = ["""How many dogs are in this image?"""]
UpperCAmelCase = tokenizer(_snake_case , return_tensors="""pt""" ).input_ids
UpperCAmelCase = hf_vqa_model.generate(_snake_case , _snake_case )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
UpperCAmelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
UpperCAmelCase = blip_itm(pretrained=_snake_case , image_size=_snake_case , vit="""base""" )
itm_model.eval()
UpperCAmelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase = modified_state_dict.pop(_snake_case )
UpperCAmelCase = rename_key(_snake_case )
UpperCAmelCase = value
UpperCAmelCase = BlipForImageTextRetrieval(_snake_case )
UpperCAmelCase = ["""A picture of a woman with a dog sitting in a beach"""]
UpperCAmelCase = tokenizer(
_snake_case , return_tensors="""pt""" , padding="""max_length""" , truncation=_snake_case , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_snake_case )
hf_itm_model.eval()
UpperCAmelCase = hf_itm_model(_snake_case , _snake_case , use_itm_head=_snake_case )
UpperCAmelCase = hf_itm_model(_snake_case , _snake_case , use_itm_head=_snake_case )
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
_UpperCamelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 74
|
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_UpperCamelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
_UpperCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def _a ( _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = create_model(
"""HTSAT-tiny""" , """roberta""" , _snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_snake_case , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = {}
UpperCAmelCase = R""".*sequential.(\d+).*"""
UpperCAmelCase = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase = key.replace(_snake_case , _snake_case )
if re.match(_snake_case , _snake_case ):
# replace sequential layers with list
UpperCAmelCase = re.match(_snake_case , _snake_case ).group(1 )
UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_snake_case )//3}.linear.''' )
elif re.match(_snake_case , _snake_case ):
UpperCAmelCase = int(re.match(_snake_case , _snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase = 1 if projecton_layer == 0 else 2
UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase = value
UpperCAmelCase = mixed_qkv.size(0 ) // 3
UpperCAmelCase = mixed_qkv[:qkv_dim]
UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase = query_layer
UpperCAmelCase = key_layer
UpperCAmelCase = value_layer
else:
UpperCAmelCase = value
return model_state_dict
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = init_clap(_snake_case , enable_fusion=_snake_case )
clap_model.eval()
UpperCAmelCase = clap_model.state_dict()
UpperCAmelCase = rename_state_dict(_snake_case )
UpperCAmelCase = ClapConfig()
UpperCAmelCase = enable_fusion
UpperCAmelCase = ClapModel(_snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(_snake_case , strict=_snake_case )
model.save_pretrained(_snake_case )
transformers_config.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
_UpperCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 74
| 1
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = 384
if "tiny" in model_name:
lowerCamelCase__ = [3, 3, 9, 3]
lowerCamelCase__ = [96, 192, 384, 768]
if "small" in model_name:
lowerCamelCase__ = [3, 3, 27, 3]
lowerCamelCase__ = [96, 192, 384, 768]
if "base" in model_name:
lowerCamelCase__ = [3, 3, 27, 3]
lowerCamelCase__ = [128, 256, 512, 1024]
lowerCamelCase__ = 512
if "large" in model_name:
lowerCamelCase__ = [3, 3, 27, 3]
lowerCamelCase__ = [192, 384, 768, 1536]
lowerCamelCase__ = 768
if "xlarge" in model_name:
lowerCamelCase__ = [3, 3, 27, 3]
lowerCamelCase__ = [256, 512, 1024, 2048]
lowerCamelCase__ = 1024
# set label information
lowerCamelCase__ = 150
lowerCamelCase__ = """huggingface/label-files"""
lowerCamelCase__ = """ade20k-id2label.json"""
lowerCamelCase__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
lowerCamelCase__ = ConvNextConfig(
depths=__lowerCAmelCase , hidden_sizes=__lowerCAmelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
lowerCamelCase__ = UperNetConfig(
backbone_config=__lowerCAmelCase , auxiliary_in_channels=__lowerCAmelCase , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase , )
return config
def A__ ( __lowerCAmelCase : Optional[Any] ):
lowerCamelCase__ = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ):
lowerCamelCase__ = dct.pop(__lowerCAmelCase )
lowerCamelCase__ = val
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ):
lowerCamelCase__ = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
lowerCamelCase__ = model_name_to_url[model_name]
lowerCamelCase__ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" )["""state_dict"""]
lowerCamelCase__ = get_upernet_config(__lowerCAmelCase )
lowerCamelCase__ = UperNetForSemanticSegmentation(__lowerCAmelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
if "bn" in key:
lowerCamelCase__ = key.replace("""bn""" , """batch_norm""" )
lowerCamelCase__ = val
# rename keys
lowerCamelCase__ = create_rename_keys(__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
# verify on image
lowerCamelCase__ = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert("""RGB""" )
lowerCamelCase__ = SegformerImageProcessor()
lowerCamelCase__ = processor(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
lowerCamelCase__ = model(__lowerCAmelCase )
if model_name == "upernet-convnext-tiny":
lowerCamelCase__ = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
lowerCamelCase__ = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
lowerCamelCase__ = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
lowerCamelCase__ = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
lowerCamelCase__ = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCamelCase : Any = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 50
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __magic_name__ ( unittest.TestCase ):
@property
def __lowercase ( self : int ):
torch.manual_seed(0 )
_a : int = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
@property
def __lowercase ( self : Any ):
torch.manual_seed(0 )
_a : int = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=3 ,)
return model
@property
def __lowercase ( self : Union[str, Any] ):
torch.manual_seed(0 )
_a : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(_UpperCAmelCase )
def __lowercase ( self : Dict ):
_a : Union[str, Any] = self.dummy_uncond_unet
_a : Union[str, Any] = DDIMScheduler()
_a : Union[str, Any] = self.dummy_vq_model
_a : List[Any] = LDMPipeline(unet=_UpperCAmelCase ,vqvae=_UpperCAmelCase ,scheduler=_UpperCAmelCase )
ldm.to(_UpperCAmelCase )
ldm.set_progress_bar_config(disable=_UpperCAmelCase )
_a : Dict = torch.manual_seed(0 )
_a : int = ldm(generator=_UpperCAmelCase ,num_inference_steps=2 ,output_type='numpy' ).images
_a : Tuple = torch.manual_seed(0 )
_a : List[Any] = ldm(generator=_UpperCAmelCase ,num_inference_steps=2 ,output_type='numpy' ,return_dict=_UpperCAmelCase )[0]
_a : Any = image[0, -3:, -3:, -1]
_a : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : Union[str, Any] = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
_a : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : Optional[int] ):
_a : Dict = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(_UpperCAmelCase )
ldm.set_progress_bar_config(disable=_UpperCAmelCase )
_a : str = torch.manual_seed(0 )
_a : Optional[Any] = ldm(generator=_UpperCAmelCase ,num_inference_steps=5 ,output_type='numpy' ).images
_a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_a : Union[str, Any] = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
_a : List[str] = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 358
| 0
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case : Any = 16
snake_case : List[Any] = 32
def lowercase__ ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 16 ):
'''simple docstring'''
__lowercase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__lowercase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__UpperCamelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__lowercase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowercase = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCamelCase : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowercase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowercase = 16
elif accelerator.mixed_precision != "no":
__lowercase = 8
else:
__lowercase = None
return tokenizer.pad(
__UpperCamelCase , padding="""longest""" , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
__lowercase = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
__lowercase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case : Optional[int] = mocked_dataloaders # noqa: F811
def lowercase__ ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __UpperCamelCase ) == "1":
__lowercase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__lowercase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
__lowercase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase = config["""lr"""]
__lowercase = int(config["""num_epochs"""] )
__lowercase = int(config["""seed"""] )
__lowercase = int(config["""batch_size"""] )
set_seed(__UpperCamelCase )
__lowercase , __lowercase = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
__lowercase = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
__lowercase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowercase = batch_size // MAX_GPU_BATCH_SIZE
__lowercase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowercase = model.to(accelerator.device )
# Instantiate optimizer
__lowercase = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
__lowercase = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__lowercase = os.path.split(__UpperCamelCase )[-1].split(""".""" )[0]
accelerator.init_trackers(__UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__lowercase = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowercase = model(**__UpperCamelCase )
__lowercase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__lowercase = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__lowercase = model(**__UpperCamelCase )
__lowercase = outputs.logits.argmax(dim=-1 )
__lowercase , __lowercase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
__lowercase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __UpperCamelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(__UpperCamelCase ),
"""epoch""": epoch,
} , step=__UpperCamelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowercase__ ( ):
'''simple docstring'''
__lowercase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=__UpperCamelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
__lowercase = parser.parse_args()
__lowercase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 339
|
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowercase__ ( __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase = np.full((len(__UpperCamelCase ), sequence_length, 2) , __UpperCamelCase )
else:
__lowercase = np.full((len(__UpperCamelCase ), sequence_length) , __UpperCamelCase )
for i, tensor in enumerate(__UpperCamelCase ):
if padding_side == "right":
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase = tensor[:sequence_length]
else:
__lowercase = tensor[:sequence_length]
else:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase = tensor[:sequence_length]
else:
__lowercase = tensor[:sequence_length]
return out_tensor.tolist()
def lowercase__ ( __UpperCamelCase : Dict ):
'''simple docstring'''
__lowercase = ord(__UpperCamelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__lowercase = unicodedata.category(__UpperCamelCase )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : PreTrainedTokenizerBase
UpperCamelCase : Union[bool, str, PaddingStrategy] = True
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : int = -100
UpperCamelCase : str = "pt"
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
import torch
__lowercase = """label""" if """label""" in features[0].keys() else """labels"""
__lowercase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__lowercase = self.tokenizer.pad(
__UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
__lowercase = torch.tensor(batch["""entity_ids"""] ).shape[1]
__lowercase = self.tokenizer.padding_side
if padding_side == "right":
__lowercase = [
list(__UpperCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) for label in labels
]
else:
__lowercase = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) + list(__UpperCAmelCase ) for label in labels
]
__lowercase = [feature["""ner_tags"""] for feature in features]
__lowercase = padding_tensor(__UpperCAmelCase , -1 , __UpperCAmelCase , __UpperCAmelCase )
__lowercase = [feature["""original_entity_spans"""] for feature in features]
__lowercase = padding_tensor(__UpperCAmelCase , (-1, -1) , __UpperCAmelCase , __UpperCAmelCase )
__lowercase = {k: torch.tensor(__UpperCAmelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 339
| 1
|
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __lowercase :
@staticmethod
def __lowercase ( *A : Any ,**A : Union[str, Any] ):
'''simple docstring'''
pass
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = np.array(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = npimg.shape
return {"hash": hashimage(__UpperCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __lowercase ( unittest.TestCase ):
snake_case_ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
snake_case_ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __lowercase ( self : Optional[Any] ,A : int ,A : Union[str, Any] ,A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = MaskGenerationPipeline(model=A ,image_processor=A )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowercase ( self : Optional[int] ,A : List[str] ,A : Dict ):
'''simple docstring'''
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
@slow
@require_torch
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = pipeline("""mask-generation""" ,model="""facebook/sam-vit-huge""" )
UpperCAmelCase__ : Optional[Any] = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase__ : Optional[int] = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(A ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(A ,decimals=4 ) ,[
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0_4_4_4},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0_2_1},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0_1_6_7},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0_1_3_2},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0_0_5_3},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9_9_6_7},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.9_9_3},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9_9_0_9},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9_8_7_9},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9_8_3_4},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9_7_1_6},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9_6_1_2},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9_5_9_9},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9_5_5_2},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9_5_3_2},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9_5_1_6},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9_4_9_9},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9_4_8_3},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9_4_6_4},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.9_4_3},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.9_4_3},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9_4_0_8},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9_3_3_5},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9_3_2_6},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9_2_6_2},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8_9_9_9},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8_9_8_6},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8_9_8_4},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8_8_7_3},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8_8_7_1}
] ,)
# fmt: on
@require_torch
@slow
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = """facebook/sam-vit-huge"""
UpperCAmelCase__ : Dict = pipeline("""mask-generation""" ,model=A )
UpperCAmelCase__ : int = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" ,pred_iou_thresh=1 ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase__ : int = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(A ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(A ,decimals=4 ) ,[
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0_4_4_4},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0_2_1_0},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0_1_6_7},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0_1_3_2},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0_0_5_3},
] ,)
| 65
|
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
A_ : int =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] , snake_case : Tuple , snake_case : Union[str, Any] )-> Optional[Any]:
_lowerCamelCase = WavaVecaForSequenceClassification.from_pretrained(snake_case , config=snake_case )
_lowerCamelCase = downstream_dict['projector.weight']
_lowerCamelCase = downstream_dict['projector.bias']
_lowerCamelCase = downstream_dict['model.post_net.linear.weight']
_lowerCamelCase = downstream_dict['model.post_net.linear.bias']
return model
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, Any] , snake_case : Dict , snake_case : Optional[int] )-> List[str]:
_lowerCamelCase = WavaVecaForAudioFrameClassification.from_pretrained(snake_case , config=snake_case )
_lowerCamelCase = downstream_dict['model.linear.weight']
_lowerCamelCase = downstream_dict['model.linear.bias']
return model
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : List[str] , snake_case : Any )-> Tuple:
_lowerCamelCase = WavaVecaForXVector.from_pretrained(snake_case , config=snake_case )
_lowerCamelCase = downstream_dict['connector.weight']
_lowerCamelCase = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_lowerCamelCase = downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
_lowerCamelCase = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
_lowerCamelCase = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
_lowerCamelCase = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
_lowerCamelCase = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
_lowerCamelCase = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
_lowerCamelCase = downstream_dict['objective.W']
return model
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( snake_case : List[str] , snake_case : Optional[Any] , snake_case : int , snake_case : Dict )-> str:
_lowerCamelCase = torch.load(snake_case , map_location='cpu' )
_lowerCamelCase = checkpoint['Downstream']
_lowerCamelCase = WavaVecaConfig.from_pretrained(snake_case )
_lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(
snake_case , return_attention_mask=snake_case , do_normalize=snake_case )
_lowerCamelCase = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
_lowerCamelCase = convert_classification(snake_case , snake_case , snake_case )
elif arch.endswith('ForAudioFrameClassification' ):
_lowerCamelCase = convert_diarization(snake_case , snake_case , snake_case )
elif arch.endswith('ForXVector' ):
_lowerCamelCase = convert_xvector(snake_case , snake_case , snake_case )
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
_lowerCamelCase = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(snake_case )
hf_model.save_pretrained(snake_case )
if __name__ == "__main__":
A_ : Optional[int] =argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
A_ : List[Any] =parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 650
| 0
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Tuple ='efficientnet'
def __init__( self : Union[str, Any] , __lowercase : int = 3 , __lowercase : int = 600 , __lowercase : float = 2.0 , __lowercase : float = 3.1 , __lowercase : int = 8 , __lowercase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowercase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __lowercase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __lowercase : List[int] = [] , __lowercase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowercase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowercase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowercase : float = 0.25 , __lowercase : str = "swish" , __lowercase : int = 2560 , __lowercase : str = "mean" , __lowercase : float = 0.02 , __lowercase : float = 0.001 , __lowercase : float = 0.99 , __lowercase : float = 0.5 , __lowercase : float = 0.2 , **__lowercase : List[Any] , ):
'''simple docstring'''
super().__init__(**__lowercase )
__a = num_channels
__a = image_size
__a = width_coefficient
__a = depth_coefficient
__a = depth_divisor
__a = kernel_sizes
__a = in_channels
__a = out_channels
__a = depthwise_padding
__a = strides
__a = num_block_repeats
__a = expand_ratios
__a = squeeze_expansion_ratio
__a = hidden_act
__a = hidden_dim
__a = pooling_type
__a = initializer_range
__a = batch_norm_eps
__a = batch_norm_momentum
__a = dropout_rate
__a = drop_connect_rate
__a = sum(__lowercase ) * 4
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] =version.parse('1.11' )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return 1E-5
| 547
|
import os
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = os.path.dirname(os.path.realpath(_SCREAMING_SNAKE_CASE ) )
__a = os.path.join(_SCREAMING_SNAKE_CASE , """triangle.txt""" )
with open(_SCREAMING_SNAKE_CASE ) as f:
__a = f.readlines()
__a = []
for line in triangle:
__a = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(_SCREAMING_SNAKE_CASE ) )
a.append(_SCREAMING_SNAKE_CASE )
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
for j in range(len(a[i] ) ):
__a = a[i - 1][j] if j != len(a[i - 1] ) else 0
__a = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 547
| 1
|
"""simple docstring"""
import heapq
import sys
import numpy as np
A_ : Optional[Any] = tuple[int, int]
class lowerCamelCase :
def __init__( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = set()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
return len(self.elements ) == 0
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Any ) -> Optional[Any]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__UpperCAmelCase )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE__ = []
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : str ) -> Union[str, Any]:
if item in self.set:
self.set.remove(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = []
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
return self.elements[0][1]
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = heapq.heappop(self.elements )
self.set.remove(__UpperCAmelCase )
return (priority, item)
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(snake_case__ )
SCREAMING_SNAKE_CASE__ = np.array(snake_case__ )
return np.linalg.norm(a - b )
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
return consistent_heuristic(snake_case__ , snake_case__ ) // t
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = g_function[start] + Wa * heuristics[i](snake_case__ , snake_case__ )
return ans
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.chararray((n, n) )
for i in range(snake_case__ ):
for j in range(snake_case__ ):
SCREAMING_SNAKE_CASE__ = """*"""
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE__ = """#"""
SCREAMING_SNAKE_CASE__ = """-"""
SCREAMING_SNAKE_CASE__ = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = x
# print(x)
SCREAMING_SNAKE_CASE__ = """-"""
SCREAMING_SNAKE_CASE__ = back_pointer[x]
SCREAMING_SNAKE_CASE__ = """-"""
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
SCREAMING_SNAKE_CASE__ = back_pointer[goal]
while x != start:
print(snake_case__ , end=""" """ )
SCREAMING_SNAKE_CASE__ = back_pointer[x]
print(snake_case__ )
sys.exit()
def A ( snake_case__ ):
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
for itera in range(snake_case__ ):
open_list[itera].remove_element(snake_case__ )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = s
SCREAMING_SNAKE_CASE__ = (x - 1, y)
SCREAMING_SNAKE_CASE__ = (x + 1, y)
SCREAMING_SNAKE_CASE__ = (x, y + 1)
SCREAMING_SNAKE_CASE__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(snake_case__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(snake_case__ )
SCREAMING_SNAKE_CASE__ = -1
SCREAMING_SNAKE_CASE__ = float("""inf""" )
if valid(snake_case__ ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE__ = g_function[s] + 1
SCREAMING_SNAKE_CASE__ = s
if neighbours not in close_list_anchor:
open_list[0].put(snake_case__ , key(snake_case__ , 0 , snake_case__ , snake_case__ ) )
if neighbours not in close_list_inad:
for var in range(1 , snake_case__ ):
if key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) <= Wa * key(
snake_case__ , 0 , snake_case__ , snake_case__ ):
open_list[j].put(
snake_case__ , key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) )
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
A_ : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
A_ : Optional[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
A_ : List[str] = make_common_ground()
A_ : Optional[Any] = blocks_blk
# hyper parameters
A_ : str = 1
A_ : List[str] = 1
A_ : Tuple = 20
A_ : Union[str, Any] = 3 # one consistent and two other inconsistent
# start and end destination
A_ : Optional[Any] = (0, 0)
A_ : Dict = (n - 1, n - 1)
A_ : Any = 1
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {start: 0, goal: float("""inf""" )}
SCREAMING_SNAKE_CASE__ = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = set()
for i in range(snake_case__ ):
open_list.append(PriorityQueue() )
open_list[i].put(snake_case__ , key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , snake_case__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(snake_case__ , snake_case__ , snake_case__ )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = open_list[i].top_show()
visited.add(snake_case__ )
expand_state(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
close_list_inad.append(snake_case__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(snake_case__ , snake_case__ , snake_case__ )
else:
SCREAMING_SNAKE_CASE__ = open_list[0].top_show()
visited.add(snake_case__ )
expand_state(
snake_case__ , 0 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
close_list_anchor.append(snake_case__ )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(snake_case__ ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 196
|
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
A_ : int = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
A_ : int = dataset.iloc[:, 1:2].values
A_ : Optional[Any] = dataset.iloc[:, 2].values
A_ , A_ , A_ , A_ : str = train_test_split(X, y, test_size=0.2, random_state=0)
A_ : Optional[Any] = PolynomialFeatures(degree=4)
A_ : Tuple = poly_reg.fit_transform(X)
A_ : str = LinearRegression()
pol_reg.fit(X_poly, y)
def A ( ):
'''simple docstring'''
plt.scatter(snake_case__ , snake_case__ , color="""red""" )
plt.plot(snake_case__ , pol_reg.predict(poly_reg.fit_transform(snake_case__ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 196
| 1
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = R'''\w+[.]\d+'''
__SCREAMING_SNAKE_CASE : Optional[int] = re.findall(snake_case , snake_case )
for pat in pats:
__SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(snake_case , '''_'''.join(pat.split('''.''' ) ) )
return key
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__SCREAMING_SNAKE_CASE : str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__SCREAMING_SNAKE_CASE : Any = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__SCREAMING_SNAKE_CASE : List[str] = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__SCREAMING_SNAKE_CASE : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__SCREAMING_SNAKE_CASE : Optional[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__SCREAMING_SNAKE_CASE : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
__SCREAMING_SNAKE_CASE : Dict = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__SCREAMING_SNAKE_CASE : int = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__SCREAMING_SNAKE_CASE : str = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a__ ( snake_case , snake_case , snake_case=42 ):
"""simple docstring"""
# Step 1: Convert pytorch tensor to numpy
__SCREAMING_SNAKE_CASE : Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__SCREAMING_SNAKE_CASE : Dict = flax_model.init_weights(PRNGKey(snake_case ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = flatten_dict(snake_case )
__SCREAMING_SNAKE_CASE : Dict = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__SCREAMING_SNAKE_CASE : int = rename_key(snake_case )
__SCREAMING_SNAKE_CASE : Dict = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = rename_key_and_reshape_tensor(snake_case , snake_case , snake_case )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
__SCREAMING_SNAKE_CASE : Any = jnp.asarray(snake_case )
return unflatten_dict(snake_case )
| 131
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , _A : Optional[int] , _A : Tuple=7 , _A : Optional[int]=3 , _A : Optional[Any]=18 , _A : Dict=30 , _A : str=400 , _A : Optional[int]=True , _A : str=None , _A : str=True , _A : str=None , _A : List[str]=True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = size if size is not None else {'''shortest_edge''': 20}
__SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__SCREAMING_SNAKE_CASE : List[str] = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : List[Any] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = min_resolution
__SCREAMING_SNAKE_CASE : Tuple = max_resolution
__SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize
__SCREAMING_SNAKE_CASE : int = size
__SCREAMING_SNAKE_CASE : Optional[Any] = do_center_crop
__SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size
__SCREAMING_SNAKE_CASE : Optional[int] = do_flip_channel_order
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = MobileViTImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''center_crop''' ) )
self.assertTrue(hasattr(_A , '''do_flip_channel_order''' ) )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 131
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A__ : Tuple = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
A__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 171
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
A__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase_ ,R'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' ,)
class __snake_case ( UpperCamelCase_ ):
def UpperCAmelCase__ ( self : Optional[Any] , A_ : GenericTensor):
if self.framework == "tf":
lowerCAmelCase_ : Dict = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
elif self.framework == "pt":
lowerCAmelCase_ : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=A_)
else:
raise ValueError('''Unsupported framework''')
return masked_index
def UpperCAmelCase__ ( self : Tuple , A_ : GenericTensor):
lowerCAmelCase_ : List[str] = self.get_masked_index(A_)
lowerCAmelCase_ : Union[str, Any] = np.prod(masked_index.shape)
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def UpperCAmelCase__ ( self : str , A_ : GenericTensor):
if isinstance(A_ , A_):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0])
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(A_)
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Union[str, Any] , A_ : Optional[int]=None , **A_ : List[str]):
if return_tensors is None:
lowerCAmelCase_ : Optional[int] = self.framework
lowerCAmelCase_ : Optional[Any] = self.tokenizer(A_ , return_tensors=A_)
self.ensure_exactly_one_mask_token(A_)
return model_inputs
def UpperCAmelCase__ ( self : List[str] , A_ : str):
lowerCAmelCase_ : Union[str, Any] = self.model(**A_)
lowerCAmelCase_ : List[str] = model_inputs['''input_ids''']
return model_outputs
def UpperCAmelCase__ ( self : str , A_ : str , A_ : str=5 , A_ : int=None):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCAmelCase_ : int = target_ids.shape[0]
lowerCAmelCase_ : List[Any] = model_outputs['''input_ids'''][0]
lowerCAmelCase_ : int = model_outputs['''logits''']
if self.framework == "tf":
lowerCAmelCase_ : Union[str, Any] = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0]
lowerCAmelCase_ : Optional[Any] = outputs.numpy()
lowerCAmelCase_ : List[str] = outputs[0, masked_index, :]
lowerCAmelCase_ : List[Any] = stable_softmax(A_ , axis=-1)
if target_ids is not None:
lowerCAmelCase_ : str = tf.gather_nd(tf.squeeze(A_ , 0) , target_ids.reshape(-1 , 1))
lowerCAmelCase_ : Any = tf.expand_dims(A_ , 0)
lowerCAmelCase_ : List[Any] = tf.math.top_k(A_ , k=A_)
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
lowerCAmelCase_ : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=A_).squeeze(-1)
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCAmelCase_ : Dict = outputs[0, masked_index, :]
lowerCAmelCase_ : Dict = logits.softmax(dim=-1)
if target_ids is not None:
lowerCAmelCase_ : str = probs[..., target_ids]
lowerCAmelCase_ , lowerCAmelCase_ : int = probs.topk(A_)
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Optional[int] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist())):
lowerCAmelCase_ : int = []
for v, p in zip(_values , _predictions):
# Copy is important since we're going to modify this array in place
lowerCAmelCase_ : Dict = input_ids.numpy().copy()
if target_ids is not None:
lowerCAmelCase_ : str = target_ids[p].tolist()
lowerCAmelCase_ : List[Any] = p
# Filter padding out:
lowerCAmelCase_ : Tuple = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCAmelCase_ : Any = self.tokenizer.decode(A_ , skip_special_tokens=A_)
lowerCAmelCase_ : str = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p]), '''sequence''': sequence}
row.append(A_)
result.append(A_)
if single_mask:
return result[0]
return result
def UpperCAmelCase__ ( self : int , A_ : Any , A_ : List[Any]=None):
if isinstance(A_ , A_):
lowerCAmelCase_ : List[str] = [targets]
try:
lowerCAmelCase_ : Union[str, Any] = self.tokenizer.get_vocab()
except Exception:
lowerCAmelCase_ : str = {}
lowerCAmelCase_ : Any = []
for target in targets:
lowerCAmelCase_ : List[str] = vocab.get(A_ , A_)
if id_ is None:
lowerCAmelCase_ : Optional[int] = self.tokenizer(
A_ , add_special_tokens=A_ , return_attention_mask=A_ , return_token_type_ids=A_ , max_length=1 , truncation=A_ , )['''input_ids''']
if len(A_) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''')
continue
lowerCAmelCase_ : Union[str, Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`.""")
target_ids.append(id_)
lowerCAmelCase_ : List[str] = list(set(A_))
if len(A_) == 0:
raise ValueError('''At least one target must be provided when passed.''')
lowerCAmelCase_ : Tuple = np.array(A_)
return target_ids
def UpperCAmelCase__ ( self : List[Any] , A_ : Optional[int]=None , A_ : Tuple=None):
lowerCAmelCase_ : int = {}
if targets is not None:
lowerCAmelCase_ : Optional[Any] = self.get_target_ids(A_ , A_)
lowerCAmelCase_ : str = target_ids
if top_k is not None:
lowerCAmelCase_ : int = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''')
return {}, {}, postprocess_params
def __call__( self : str , A_ : Tuple , *A_ : Dict , **A_ : Optional[Any]):
lowerCAmelCase_ : Tuple = super().__call__(A_ , **A_)
if isinstance(A_ , A_) and len(A_) == 1:
return outputs[0]
return outputs
| 171
| 1
|
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
snake_case = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
snake_case = 'main'
# Default branch name
snake_case = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
snake_case = 'aaaaaaa'
# This commit does not exist, so we should 404.
snake_case = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
snake_case = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
print('Bonjour!' )
yield
print('Au revoir!' )
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class A_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def __UpperCAmelCase ( self : Optional[int] ,__A : List[str] ) -> Optional[int]:
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() ,'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def __UpperCAmelCase ( self : Optional[int] ,__A : Tuple ) -> List[Any]:
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def __UpperCAmelCase ( self : int ,__A : List[Any] ) -> int:
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def __UpperCAmelCase ( self : int ) -> str:
self.assertEqual(find_labels(__A ) ,['labels'] )
self.assertEqual(find_labels(__A ) ,['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__A ) ,['start_positions', 'end_positions'] )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
pass
self.assertEqual(find_labels(__A ) ,['labels'] )
@require_tf
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
self.assertEqual(find_labels(__A ) ,['labels'] )
self.assertEqual(find_labels(__A ) ,['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__A ) ,['start_positions', 'end_positions'] )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
pass
self.assertEqual(find_labels(__A ) ,['labels'] )
@require_flax
def __UpperCAmelCase ( self : Dict ) -> int:
# Flax models don't have labels
self.assertEqual(find_labels(__A ) ,[] )
self.assertEqual(find_labels(__A ) ,[] )
self.assertEqual(find_labels(__A ) ,[] )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
pass
self.assertEqual(find_labels(__A ) ,[] )
| 704
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = '''gptj'''
SCREAMING_SNAKE_CASE_ : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : str ,__A : List[str]=5_0400 ,__A : Dict=2048 ,__A : List[str]=4096 ,__A : Dict=28 ,__A : Optional[int]=16 ,__A : Tuple=64 ,__A : Optional[int]=None ,__A : Optional[int]="gelu_new" ,__A : Dict=0.0 ,__A : List[str]=0.0 ,__A : Optional[int]=0.0 ,__A : str=1e-5 ,__A : List[Any]=0.02 ,__A : str=True ,__A : int=5_0256 ,__A : Union[str, Any]=5_0256 ,__A : int=False ,**__A : List[Any] ,) -> str:
_lowercase = vocab_size
_lowercase = n_positions
_lowercase = n_embd
_lowercase = n_layer
_lowercase = n_head
_lowercase = n_inner
_lowercase = rotary_dim
_lowercase = activation_function
_lowercase = resid_pdrop
_lowercase = embd_pdrop
_lowercase = attn_pdrop
_lowercase = layer_norm_epsilon
_lowercase = initializer_range
_lowercase = use_cache
_lowercase = bos_token_id
_lowercase = eos_token_id
super().__init__(
bos_token_id=__A ,eos_token_id=__A ,tie_word_embeddings=__A ,**__A )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple ,__A : PretrainedConfig ,__A : str = "default" ,__A : List[PatchingSpec] = None ,__A : bool = False ,) -> Any:
super().__init__(__A ,task=__A ,patching_specs=__A ,use_past=__A )
if not getattr(self._config ,'pad_token_id' ,__A ):
# TODO: how to do that better?
_lowercase = 0
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
_lowercase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
_lowercase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
return self._config.n_layer
@property
def __UpperCAmelCase ( self : Tuple ) -> int:
return self._config.n_head
def __UpperCAmelCase ( self : Any ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = super(__A ,self ).generate_dummy_inputs(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
# We need to order the input in the way they appears in the forward()
_lowercase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase = seqlen + 2
_lowercase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
_lowercase = common_inputs['attention_mask']
if self.use_past:
_lowercase = ordered_inputs['attention_mask'].dtype
_lowercase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__A ,__A ,dtype=__A )] ,dim=1 )
return ordered_inputs
@property
def __UpperCAmelCase ( self : str ) -> int:
return 13
| 535
| 0
|
import math
def __UpperCamelCase ( lowercase__ : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCamelCase ( lowercase__ : int = 10001 ) -> int:
'''simple docstring'''
try:
lowerCAmelCase_ : Optional[int] = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""" ) from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""" )
lowerCAmelCase_ : list[int] = []
lowerCAmelCase_ : Any = 2
while len(lowercase__ ) < nth:
if is_prime(lowercase__ ):
primes.append(lowercase__ )
num += 1
else:
num += 1
return primes[len(lowercase__ ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 600
|
import pprint
import requests
__UpperCAmelCase = 'https://zenquotes.io/api'
def __UpperCamelCase ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __UpperCamelCase ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
__UpperCAmelCase = random_quotes()
pprint.pprint(response)
| 600
| 1
|
'''simple docstring'''
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _snake_case (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__A : Optional[Any] =1
@register_to_config
def __init__( self ,_snake_case=20_00 ,_snake_case=0.1 ,_snake_case=20 ,_snake_case=1E-3 ):
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Optional[int] = None
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
UpperCAmelCase_ : List[Any] = torch.linspace(1 ,self.config.sampling_eps ,_lowercase ,device=_lowercase )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case=None ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
UpperCAmelCase_ : Optional[Any] = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
UpperCAmelCase_ : Dict = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
UpperCAmelCase_ : str = std.flatten()
while len(std.shape ) < len(score.shape ):
UpperCAmelCase_ : List[str] = std.unsqueeze(-1 )
UpperCAmelCase_ : str = -score / std
# compute
UpperCAmelCase_ : Any = -1.0 / len(self.timesteps )
UpperCAmelCase_ : List[str] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
UpperCAmelCase_ : Tuple = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
UpperCAmelCase_ : Union[str, Any] = beta_t.unsqueeze(-1 )
UpperCAmelCase_ : List[str] = -0.5 * beta_t * x
UpperCAmelCase_ : Dict = torch.sqrt(_lowercase )
UpperCAmelCase_ : Any = drift - diffusion**2 * score
UpperCAmelCase_ : str = x + drift * dt
# add noise
UpperCAmelCase_ : Dict = randn_tensor(x.shape ,layout=x.layout ,generator=_lowercase ,device=x.device ,dtype=x.dtype )
UpperCAmelCase_ : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
return self.config.num_train_timesteps
| 721
|
'''simple docstring'''
from collections import deque
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = deque()
UpperCAmelCase_ : Dict = [False for _ in range(_SCREAMING_SNAKE_CASE )]
UpperCAmelCase_ : Optional[Any] = [-1 for _ in range(_SCREAMING_SNAKE_CASE )]
UpperCAmelCase_ : Tuple = index_of[:]
def strong_connect(_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple ):
UpperCAmelCase_ : Dict = index # the number when this node is seen
UpperCAmelCase_ : str = index # lowest rank node reachable from here
index += 1
stack.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = True
for w in g[v]:
if index_of[w] == -1:
UpperCAmelCase_ : Union[str, Any] = strong_connect(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCAmelCase_ : int = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : List[Any] = stack.pop()
UpperCAmelCase_ : Union[str, Any] = False
component.append(_SCREAMING_SNAKE_CASE )
while w != v:
UpperCAmelCase_ : Union[str, Any] = stack.pop()
UpperCAmelCase_ : str = False
component.append(_SCREAMING_SNAKE_CASE )
components.append(_SCREAMING_SNAKE_CASE )
return index
UpperCAmelCase_ : List[str] = []
for v in range(_SCREAMING_SNAKE_CASE ):
if index_of[v] == -1:
strong_connect(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE )
return components
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for u, v in edges:
g[u].append(_SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
# Test
_lowerCamelCase = 7
_lowerCamelCase = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_lowerCamelCase = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_lowerCamelCase = [(u, v) for u, v in zip(source, target)]
_lowerCamelCase = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 323
| 0
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Dict:
return 1 / (1 + np.exp(-z ))
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return (-y * np.log(_UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
SCREAMING_SNAKE_CASE_ : str = np.dot(_UpperCamelCase , _UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(_UpperCamelCase ) ) )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=7_0000 ) -> str:
SCREAMING_SNAKE_CASE_ : int = np.zeros(x.shape[1] )
for iterations in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = np.dot(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE_ : int = sigmoid_function(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.dot(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE_ : List[str] = sigmoid_function(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : int = cost_function(_UpperCamelCase , _UpperCamelCase )
if iterations % 100 == 0:
print(f'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
lowerCAmelCase__: Union[str, Any] = datasets.load_iris()
lowerCAmelCase__: List[Any] = iris.data[:, :2]
lowerCAmelCase__: Union[str, Any] = (iris.target != 0) * 1
lowerCAmelCase__: Dict = 0.1
lowerCAmelCase__: Optional[Any] = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("theta: ", theta) # printing the theta i.e our weights vector
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return sigmoid_function(
np.dot(_UpperCamelCase , _UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
(lowerCAmelCase__): Tuple = (x[:, 0].min(), x[:, 0].max())
(lowerCAmelCase__): Optional[int] = (x[:, 1].min(), x[:, 1].max())
(lowerCAmelCase__): Union[str, Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
lowerCAmelCase__: Optional[Any] = np.c_[xxa.ravel(), xxa.ravel()]
lowerCAmelCase__: str = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 345
|
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class A_ ( lowerCAmelCase_ ):
'''simple docstring'''
def a ( self ):
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = 5
# Realm tok
_UpperCamelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(A_ , exist_ok=A_ )
_UpperCamelCase = os.path.join(A_ , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCamelCase = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(A_ , exist_ok=A_ )
def a ( self ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def a ( self ):
shutil.rmtree(self.tmpdirname )
def a ( self ):
_UpperCamelCase = RealmConfig(num_block_records=self.num_block_records )
return config
def a ( self ):
_UpperCamelCase = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def a ( self ):
_UpperCamelCase = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=A_ , )
return block_records
def a ( self ):
_UpperCamelCase = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def a ( self ):
_UpperCamelCase = self.get_config()
_UpperCamelCase = self.get_dummy_retriever()
_UpperCamelCase = retriever.tokenizer
_UpperCamelCase = np.array([0, 3] , dtype="long" )
_UpperCamelCase = tokenizer(["Test question"] ).input_ids
_UpperCamelCase = tokenizer(
["the fourth"] , add_special_tokens=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , ).input_ids
_UpperCamelCase = config.reader_seq_len
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = retriever(
A_ , A_ , answer_ids=A_ , max_length=A_ , return_tensors="np" )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def a ( self ):
_UpperCamelCase = self.get_config()
_UpperCamelCase = self.get_dummy_retriever()
_UpperCamelCase = retriever.tokenizer
_UpperCamelCase = np.array([0, 3, 5] , dtype="long" )
_UpperCamelCase = tokenizer(["Test question"] ).input_ids
_UpperCamelCase = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , ).input_ids
_UpperCamelCase = config.reader_seq_len
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = retriever(
A_ , A_ , answer_ids=A_ , max_length=A_ , return_tensors="np" )
self.assertEqual([False, True, True] , A_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , A_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , A_ )
def a ( self ):
_UpperCamelCase = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_UpperCamelCase = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_UpperCamelCase = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCamelCase = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 138
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowerCamelCase ):
lowercase__ : Optional[Any] = """encoder-decoder"""
lowercase__ : Dict = True
def __init__( self , **UpperCamelCase__ ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
A__ = kwargs.pop("encoder" )
A__ = encoder_config.pop("model_type" )
A__ = kwargs.pop("decoder" )
A__ = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
A__ = AutoConfig.for_model(UpperCAmelCase_ , **UpperCAmelCase_ )
A__ = AutoConfig.for_model(UpperCAmelCase_ , **UpperCAmelCase_ )
A__ = True
@classmethod
def lowercase_ ( cls , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
A__ = True
A__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase_ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__ )
A__ = self.encoder.to_dict()
A__ = self.decoder.to_dict()
A__ = self.__class__.model_type
return output
| 706
|
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
__UpperCAmelCase ="""https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
__UpperCAmelCase =requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(1_0000):
out_file.write(data)
__UpperCAmelCase =BeautifulSoup(res.text, """html.parser""")
__UpperCAmelCase =list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(F'''https://google.com{link.get("href")}''')
| 261
| 0
|
'''simple docstring'''
def lowerCamelCase ( __lowerCamelCase : Any ) ->Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = [], []
while len(__A ) > 1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = min(__A ), max(__A )
start.append(__A )
end.append(__A )
collection.remove(__A )
collection.remove(__A )
end.reverse()
return start + collection + end
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 314
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : Optional[Any] = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 495
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : List[str] = '''table-transformer'''
__UpperCAmelCase : Dict = ['''past_key_values''']
__UpperCAmelCase : int = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=3 , UpperCamelCase__=100 , UpperCamelCase__=6 , UpperCamelCase__=2048 , UpperCamelCase__=8 , UpperCamelCase__=6 , UpperCamelCase__=2048 , UpperCamelCase__=8 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=256 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=1.0 , UpperCamelCase__=False , UpperCamelCase__="sine" , UpperCamelCase__="resnet50" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=0.1 , **UpperCamelCase__ , ) -> List[str]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
snake_case : Optional[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Optional[Any] = backbone_config.get("model_type" )
snake_case : str = CONFIG_MAPPING[backbone_model_type]
snake_case : Dict = config_class.from_dict(UpperCamelCase__ )
# set timm attributes to None
snake_case ,snake_case ,snake_case : str = None, None, None
snake_case : Optional[Any] = use_timm_backbone
snake_case : List[str] = backbone_config
snake_case : Tuple = num_channels
snake_case : Optional[Any] = num_queries
snake_case : List[Any] = d_model
snake_case : Optional[Any] = encoder_ffn_dim
snake_case : Dict = encoder_layers
snake_case : List[str] = encoder_attention_heads
snake_case : List[str] = decoder_ffn_dim
snake_case : int = decoder_layers
snake_case : List[Any] = decoder_attention_heads
snake_case : Optional[int] = dropout
snake_case : Dict = attention_dropout
snake_case : List[str] = activation_dropout
snake_case : Optional[int] = activation_function
snake_case : str = init_std
snake_case : Tuple = init_xavier_std
snake_case : str = encoder_layerdrop
snake_case : str = decoder_layerdrop
snake_case : Union[str, Any] = encoder_layers
snake_case : List[str] = auxiliary_loss
snake_case : List[Any] = position_embedding_type
snake_case : Union[str, Any] = backbone
snake_case : Optional[Any] = use_pretrained_backbone
snake_case : Tuple = dilation
# Hungarian matcher
snake_case : List[Any] = class_cost
snake_case : int = bbox_cost
snake_case : Optional[int] = giou_cost
# Loss coefficients
snake_case : Dict = mask_loss_coefficient
snake_case : Dict = dice_loss_coefficient
snake_case : List[Any] = bbox_loss_coefficient
snake_case : List[str] = giou_loss_coefficient
snake_case : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
return self.d_model
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : str = version.parse('''1.11''' )
@property
def lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCamelCase ( self ) -> float:
'''simple docstring'''
return 1e-5
@property
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
return 12
| 117
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
snake_case : Optional[int] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : List[Any] = "sshleifer/tiny-gpt2"
snake_case : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : Dict = PyTorchBenchmark(UpperCamelCase__ )
snake_case : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = "sgugger/tiny-distilbert-classification"
snake_case : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , only_pretrain_model=UpperCamelCase__ , )
snake_case : Union[str, Any] = PyTorchBenchmark(UpperCamelCase__ )
snake_case : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = "sshleifer/tiny-gpt2"
snake_case : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , torchscript=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : int = PyTorchBenchmark(UpperCamelCase__ )
snake_case : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = "sshleifer/tiny-gpt2"
snake_case : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , fpaa=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : List[Any] = PyTorchBenchmark(UpperCamelCase__ )
snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = "sshleifer/tiny-gpt2"
snake_case : int = AutoConfig.from_pretrained(UpperCamelCase__ )
# set architectures equal to `None`
snake_case : str = None
snake_case : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : List[Any] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
snake_case : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : Tuple = "sshleifer/tiny-gpt2"
snake_case : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : List[str] = PyTorchBenchmark(UpperCamelCase__ )
snake_case : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = "sshleifer/tiny-gpt2"
snake_case : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
snake_case : Optional[int] = PyTorchBenchmark(UpperCamelCase__ )
snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : Tuple = "sshleifer/tiny-gpt2"
snake_case : List[Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
snake_case : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : Union[str, Any] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
snake_case : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = "sshleifer/tinier_bart"
snake_case : List[str] = AutoConfig.from_pretrained(UpperCamelCase__ )
snake_case : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : Union[str, Any] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
snake_case : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Optional[Any] = "sshleifer/tiny-gpt2"
snake_case : str = AutoConfig.from_pretrained(UpperCamelCase__ )
snake_case : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : Optional[int] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
snake_case : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : str = "sshleifer/tinier_bart"
snake_case : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
snake_case : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : Union[str, Any] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
snake_case : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Any = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , save_to_csv=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCamelCase__ , "inf_time.csv" ) , train_memory_csv_file=os.path.join(UpperCamelCase__ , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(UpperCamelCase__ , "inf_mem.csv" ) , train_time_csv_file=os.path.join(UpperCamelCase__ , "train_time.csv" ) , env_info_csv_file=os.path.join(UpperCamelCase__ , "env.csv" ) , multi_process=UpperCamelCase__ , )
snake_case : List[Any] = PyTorchBenchmark(UpperCamelCase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "env.csv" ) ).exists() )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(UpperCamelCase__ ):
self.assertTrue(hasattr(UpperCamelCase__ , "sequential" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "cumulative" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "current" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCamelCase__ , "log.txt" ) , log_print=UpperCamelCase__ , trace_memory_line_by_line=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
snake_case : Dict = PyTorchBenchmark(UpperCamelCase__ )
snake_case : List[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "log.txt" ) ).exists() )
| 117
| 1
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : Dict = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = GPTSwaTokenizer
_UpperCAmelCase : Any = False
_UpperCAmelCase : str = True
_UpperCAmelCase : List[Any] = False
def A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case = GPTSwaTokenizer(lowercase , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : List[Any] , lowercase : Dict ):
'''simple docstring'''
_snake_case = 'This is a test'
_snake_case = 'This is a test'
return input_text, output_text
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = '<s>'
_snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(lowercase ) , 2_000 )
def A ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2_000 )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = GPTSwaTokenizer(lowercase )
_snake_case = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [465, 287, 265, 631, 842] )
_snake_case = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
lowercase , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
_snake_case = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
_snake_case = tokenizer.convert_ids_to_tokens(lowercase )
# fmt: off
self.assertListEqual(
lowercase , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = GPTSwaTokenizer(lowercase )
_snake_case = ['This is a test', 'I was born in 92000, and this is falsé.']
_snake_case = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(lowercase , lowercase ):
self.assertListEqual(tokenizer.encode_fast(lowercase ) , lowercase )
# Test that decode_fast returns the input text
for text, token_ids in zip(lowercase , lowercase ):
self.assertEqual(tokenizer.decode_fast(lowercase ) , lowercase )
@slow
def A ( self : str ):
'''simple docstring'''
_snake_case = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
_snake_case = {'input_ids': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name='AI-Sweden/gpt-sw3-126m' , sequences=lowercase , )
| 686
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCamelCase : Tuple = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Union[str, Any] = '''ResNetConfig'''
# Base docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[Any] = [1, 2_048, 7, 7]
# Image classification docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[int] = '''tiger cat'''
_lowerCamelCase : str = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 3 , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Union[str, Any] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def A ( self : Tuple , lowercase : Tensor ):
'''simple docstring'''
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowercase )
_snake_case = self.pooler(lowercase )
return embedding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int , lowercase : int , lowercase : int = 2 ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : List[str] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" , lowercase : int = 4 ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : Dict , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase : ResNetConfig , lowercase : int , lowercase : int , lowercase : int = 2 , lowercase : int = 2 , ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def A ( self : str , lowercase : Tensor , lowercase : bool = False , lowercase : bool = True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowercase )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=lowercase , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ResNetConfig
_UpperCAmelCase : Tuple = "resnet"
_UpperCAmelCase : Optional[Any] = "pixel_values"
_UpperCAmelCase : Dict = True
def A ( self : List[str] , lowercase : Dict ):
'''simple docstring'''
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Tuple , lowercase : List[Any] , lowercase : Optional[Any]=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = value
_lowerCamelCase : str = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : int = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Union[str, Any] , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : int ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowercase )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowercase )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowercase , lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowercase )
super()._init_backbone(lowercase )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def A ( self : Dict , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , )
| 686
| 1
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = FunnelTokenizer
lowerCAmelCase_ = FunnelTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def _snake_case ( self : List[str] ) -> int:
super().setUp()
_lowerCamelCase = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _snake_case ( self : Union[str, Any] , **snake_case__ : Tuple ) -> Any:
return FunnelTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _snake_case ( self : Optional[int] , **snake_case__ : int ) -> Union[str, Any]:
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **snake_case__ )
def _snake_case ( self : int , snake_case__ : int ) -> int:
_lowerCamelCase = 'UNwant\u00E9d,running'
_lowerCamelCase = 'unwanted, running'
return input_text, output_text
def _snake_case ( self : Dict ) -> Optional[int]:
_lowerCamelCase = self.tokenizer_class(self.vocab_file )
_lowerCamelCase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(snake_case__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [7, 4, 5, 1_0, 8, 9] )
def _snake_case ( self : Union[str, Any] ) -> Optional[Any]:
_lowerCamelCase = self.get_tokenizers(do_lower_case=snake_case__ )
for tokenizer in tokenizers:
_lowerCamelCase = tokenizer('UNwant\u00E9d,running' )
_lowerCamelCase = len(inputs['input_ids'] ) - 1
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len )
_lowerCamelCase = tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running' )
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len )
| 702
|
from itertools import product
def lowerCamelCase ( UpperCamelCase : int , UpperCamelCase : int ) -> list[int]:
_lowerCamelCase = sides_number
_lowerCamelCase = max_face_number * dice_number
_lowerCamelCase = [0] * (max_total + 1)
_lowerCamelCase = 1
_lowerCamelCase = range(UpperCamelCase , max_face_number + 1 )
for dice_numbers in product(UpperCamelCase , repeat=UpperCamelCase ):
_lowerCamelCase = sum(UpperCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCamelCase ( ) -> float:
_lowerCamelCase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_lowerCamelCase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_lowerCamelCase = 0
_lowerCamelCase = 9
_lowerCamelCase = 4 * 9
_lowerCamelCase = 6
for peter_total in range(UpperCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_lowerCamelCase = (4**9) * (6**6)
_lowerCamelCase = peter_wins_count / total_games_number
_lowerCamelCase = round(UpperCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 234
| 0
|
'''simple docstring'''
__UpperCamelCase : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: bytes ) -> bytes:
"""simple docstring"""
# Make sure the supplied data is a bytes-like object
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
__a = f"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(SCREAMING_SNAKE_CASE__ )
__a = ''.join(bin(SCREAMING_SNAKE_CASE__ )[2:].zfill(8 ) for byte in data )
__a = len(SCREAMING_SNAKE_CASE__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
__a = b'=' * ((6 - len(SCREAMING_SNAKE_CASE__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(SCREAMING_SNAKE_CASE__ ) % 6)
else:
__a = b''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(SCREAMING_SNAKE_CASE__ ), 6 ) ).encode()
+ padding
)
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str ) -> bytes:
"""simple docstring"""
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
__a = (
'argument should be a bytes-like object or ASCII string, '
f"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(SCREAMING_SNAKE_CASE__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
try:
__a = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
__a = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(SCREAMING_SNAKE_CASE__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__a = encoded_data[:-padding]
__a = ''.join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__a = ''.join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE__ ) )[2:].zfill(6 ) for char in encoded_data )
__a = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(SCREAMING_SNAKE_CASE__ ), 8 )
]
return bytes(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 448
|
'''simple docstring'''
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a =["input_ids", "attention_mask"]
def __init__( self , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase=125 , lowerCamelCase=None , **lowerCamelCase , ) ->None:
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__a = [F"""<extra_id_{i}>""" for i in range(lowerCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__a = len(set(filter(lambda lowerCamelCase : bool('extra_id' in str(lowerCamelCase ) ) , lowerCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
super().__init__(
eos_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , extra_ids=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , )
__a = extra_ids
__a = 2**8 # utf is 8 bits
# define special tokens dict
__a = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__a = len(self.special_tokens_encoder )
__a = len(lowerCamelCase )
for i, token in enumerate(lowerCamelCase ):
__a = self.vocab_size + i - n
__a = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCamelCase )) + [1]
return ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase )) + [1]
def __UpperCamelCase ( self , lowerCamelCase ) ->List[int]:
'''simple docstring'''
if len(lowerCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None ) ->List[int]:
'''simple docstring'''
__a = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None ) ->List[int]:
'''simple docstring'''
__a = self._add_eos_if_not_present(lowerCamelCase )
if token_ids_a is None:
return token_ids_a
else:
__a = self._add_eos_if_not_present(lowerCamelCase )
return token_ids_a + token_ids_a
def __UpperCamelCase ( self , lowerCamelCase ) ->List[str]:
'''simple docstring'''
__a = [chr(lowerCamelCase ) for i in text.encode('utf-8' )]
return tokens
def __UpperCamelCase ( self , lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
if token in self.special_tokens_encoder:
__a = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__a = self.added_tokens_encoder[token]
elif len(lowerCamelCase ) != 1:
__a = self.unk_token_id
else:
__a = ord(lowerCamelCase ) + self._num_special_tokens
return token_id
def __UpperCamelCase ( self , lowerCamelCase ) ->Tuple:
'''simple docstring'''
if index in self.special_tokens_decoder:
__a = self.special_tokens_decoder[index]
else:
__a = chr(index - self._num_special_tokens )
return token
def __UpperCamelCase ( self , lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
__a = B''
for token in tokens:
if token in self.special_tokens_decoder:
__a = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
__a = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
__a = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
__a = token.encode('utf-8' )
else:
__a = bytes([ord(lowerCamelCase )] )
bstring += tok_string
__a = bstring.decode('utf-8' , errors='ignore' )
return string
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None ) ->Tuple[str]:
'''simple docstring'''
return ()
| 448
| 1
|
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> int:
return int((input_a, input_a).count(0 ) == 0 )
def __snake_case ( ) -> None:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 346
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
# Initialise PyTorch model
_a = BertConfig.from_json_file(_UpperCamelCase )
print(f"Building PyTorch model from configuration: {config}" )
_a = BertForPreTraining(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase :Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 346
| 1
|
'''simple docstring'''
from __future__ import annotations
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if len(__a ) < k or k < 0:
raise ValueError('Invalid Input' )
__lowercase =sum(array[:k] )
for i in range(len(__a ) - k ):
__lowercase =current_sum - array[i] + array[i + k]
__lowercase =max(__a , __a )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowerCamelCase = [randint(-1000, 1000) for i in range(100)]
lowerCamelCase = randint(0, 110)
print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
| 474
|
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , SCREAMING_SNAKE_CASE_ , )
| 437
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__magic_name__ : List[str]
__magic_name__ : Optional[str] = None
# Automatically constructed
__magic_name__ : ClassVar[str] = "dict"
__magic_name__ : ClassVar[Any] = None
__magic_name__ : str = field(default="""Translation""" , init=_UpperCamelCase , repr=_UpperCamelCase)
def __call__( self : Tuple ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def _UpperCAmelCase ( self : Dict ):
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__magic_name__ : Optional[List] = None
__magic_name__ : Optional[int] = None
__magic_name__ : Optional[str] = None
# Automatically constructed
__magic_name__ : ClassVar[str] = "dict"
__magic_name__ : ClassVar[Any] = None
__magic_name__ : str = field(default="""TranslationVariableLanguages""" , init=_UpperCamelCase , repr=_UpperCamelCase)
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : str =sorted(set(self.languages ) ) if self.languages else None
A__ : Tuple =len(self.languages ) if self.languages else None
def __call__( self : str ):
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def _UpperCAmelCase ( self : int , UpperCamelCase__ : int ):
A__ : Dict =set(self.languages )
if self.languages and set(UpperCamelCase__ ) - lang_set:
raise ValueError(
F'''Some languages in example ({", ".join(sorted(set(UpperCamelCase__ ) - lang_set ) )}) are not in valid set ({", ".join(UpperCamelCase__ )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
A__ : List[str] =[]
for lang, text in translation_dict.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
A__ , A__ : Tuple =zip(*sorted(UpperCamelCase__ ) )
return {"language": languages, "translation": translations}
def _UpperCAmelCase ( self : List[str] ):
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
}
| 595
|
"""simple docstring"""
from collections import defaultdict
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
A__ : Union[str, Any] =1
A__ : int =True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase )
if ret % 2 == 0:
cuts.append(UpperCamelCase )
return ret
def lowercase ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__A , __A : List[str] = 10, 9
__A : Dict = defaultdict(list)
__A : dict[int, bool] = {}
__A : list[int] = []
__A : List[str] = 0
__A : str = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 595
| 1
|
"""simple docstring"""
from manim import *
class _A ( lowerCAmelCase ):
def A__ ( self ):
"""simple docstring"""
lowercase = Rectangle(height=0.5 , width=0.5 )
lowercase = Rectangle(height=0.2_5 , width=0.2_5 )
lowercase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
lowercase = [mem.copy() for i in range(6 )]
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase = Text("""CPU""" , font_size=24 )
lowercase = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCAmelCase )
lowercase = [mem.copy() for i in range(4 )]
lowercase = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase = Text("""GPU""" , font_size=24 )
lowercase = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCAmelCase )
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase = Text("""Model""" , font_size=24 )
lowercase = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCAmelCase )
lowercase = []
lowercase = []
lowercase = []
for i, rect in enumerate(__lowerCAmelCase ):
rect.set_stroke(__lowerCAmelCase )
lowercase = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=__lowerCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowerCAmelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowerCAmelCase , buff=0.0 )
self.add(__lowerCAmelCase )
model_cpu_arr.append(__lowerCAmelCase )
self.add(*__lowerCAmelCase , *__lowerCAmelCase , *__lowerCAmelCase )
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase = Text("""Loaded Checkpoint""" , font_size=24 )
lowercase = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowerCAmelCase )
lowercase = []
lowercase = []
for i, rect in enumerate(__lowerCAmelCase ):
lowercase = fill.copy().set_fill(__lowerCAmelCase , opacity=0.7 )
target.move_to(__lowerCAmelCase )
ckpt_arr.append(__lowerCAmelCase )
lowercase = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowerCAmelCase )
self.add(*__lowerCAmelCase , *__lowerCAmelCase )
lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCAmelCase , __lowerCAmelCase )
lowercase = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(__lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowerCAmelCase )
lowercase = MarkupText(
f'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
lowercase = [meta_mem.copy() for i in range(6 )]
lowercase = [meta_mem.copy() for i in range(6 )]
lowercase = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase = Text("""Disk""" , font_size=24 )
lowercase = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) , Write(__lowerCAmelCase , run_time=1 ) , Create(__lowerCAmelCase , run_time=1 ) )
lowercase = []
for i, rect in enumerate(__lowerCAmelCase ):
lowercase = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowerCAmelCase , run_time=1.5 ) )
self.play(*__lowerCAmelCase )
self.play(FadeOut(__lowerCAmelCase ) )
lowercase = MarkupText(f'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) )
self.play(
FadeOut(__lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , *__lowerCAmelCase ) , )
self.wait()
| 359
|
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowerCAmelCase : str =get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
__lowerCAmelCase : Tuple =get_tests_dir("""fixtures/vocab.json""")
__lowerCAmelCase : Tuple =get_tests_dir("""fixtures""")
class _A ( unittest.TestCase ):
snake_case__ : List[Any] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
def A__ ( self ):
"""simple docstring"""
lowercase = 0
def A__ ( self ):
"""simple docstring"""
lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaConfig()
lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
lowercase = AutoProcessor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
copyfile(__lowerCAmelCase , os.path.join(__lowerCAmelCase , """vocab.json""" ) )
lowercase = AutoProcessor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaFeatureExtractor()
lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowercase = WavaVecaProcessor(__lowerCAmelCase , __lowerCAmelCase )
# save in new folder
processor.save_pretrained(__lowerCAmelCase )
# drop `processor_class` in tokenizer
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , """r""" ) as f:
lowercase = json.load(__lowerCAmelCase )
config_dict.pop("""processor_class""" )
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , """w""" ) as f:
f.write(json.dumps(__lowerCAmelCase ) )
lowercase = AutoProcessor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaFeatureExtractor()
lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowercase = WavaVecaProcessor(__lowerCAmelCase , __lowerCAmelCase )
# save in new folder
processor.save_pretrained(__lowerCAmelCase )
# drop `processor_class` in feature extractor
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , """r""" ) as f:
lowercase = json.load(__lowerCAmelCase )
config_dict.pop("""processor_class""" )
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , """w""" ) as f:
f.write(json.dumps(__lowerCAmelCase ) )
lowercase = AutoProcessor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(__lowerCAmelCase )
# copy relevant files
copyfile(__lowerCAmelCase , os.path.join(__lowerCAmelCase , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , """w""" ) as f:
f.write("""{}""" )
lowercase = AutoProcessor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
with self.assertRaises(__lowerCAmelCase ):
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCAmelCase ):
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__lowerCAmelCase )
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__lowerCAmelCase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
lowercase = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
lowercase = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase )
lowercase = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def A__ ( self ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , __lowerCAmelCase )
AutoFeatureExtractor.register(__lowerCAmelCase , __lowerCAmelCase )
AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase )
AutoProcessor.register(__lowerCAmelCase , __lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoProcessor.register(__lowerCAmelCase , __lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase = CustomFeatureExtractor.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = os.path.join(__lowerCAmelCase , """vocab.txt""" )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowercase = CustomTokenizer(__lowerCAmelCase )
lowercase = CustomProcessor(__lowerCAmelCase , __lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__lowerCAmelCase )
lowercase = AutoProcessor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self ):
"""simple docstring"""
class _A ( lowerCAmelCase ):
snake_case__ : Optional[Any] = False
class _A ( lowerCAmelCase ):
snake_case__ : int = False
class _A ( lowerCAmelCase ):
snake_case__ : List[Any] = 'AutoFeatureExtractor'
snake_case__ : List[str] = 'AutoTokenizer'
snake_case__ : List[str] = False
try:
AutoConfig.register("""custom""" , __lowerCAmelCase )
AutoFeatureExtractor.register(__lowerCAmelCase , __lowerCAmelCase )
AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase )
AutoProcessor.register(__lowerCAmelCase , __lowerCAmelCase )
# If remote code is not set, the default is to use local classes.
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__lowerCAmelCase )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__lowerCAmelCase )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self ):
"""simple docstring"""
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def A__ ( self ):
"""simple docstring"""
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class _A ( unittest.TestCase ):
snake_case__ : Dict = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def A__ ( cls ):
"""simple docstring"""
lowercase = TOKEN
HfFolder.save_token(__lowerCAmelCase )
@classmethod
def A__ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def A__ ( self ):
"""simple docstring"""
lowercase = WavaVecaProcessor.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__lowerCAmelCase , """test-processor""" ) , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
lowercase = WavaVecaProcessor.from_pretrained(f'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(new_processor.feature_extractor , __lowerCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A__ ( self ):
"""simple docstring"""
lowercase = WavaVecaProcessor.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__lowerCAmelCase , """test-processor-org""" ) , push_to_hub=__lowerCAmelCase , use_auth_token=self._token , organization="""valid_org""" , )
lowercase = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(new_processor.feature_extractor , __lowerCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A__ ( self ):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowercase = CustomFeatureExtractor.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = os.path.join(__lowerCAmelCase , """vocab.txt""" )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowercase = CustomTokenizer(__lowerCAmelCase )
lowercase = CustomProcessor(__lowerCAmelCase , __lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'{USER}/test-dynamic-processor' , token=self._token )
lowercase = Repository(__lowerCAmelCase , clone_from=f'{USER}/test-dynamic-processor' , token=self._token )
processor.save_pretrained(__lowerCAmelCase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) ) as f:
lowercase = json.load(__lowerCAmelCase )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__lowerCAmelCase , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__lowerCAmelCase , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__lowerCAmelCase , """custom_processing.py""" ) ) )
repo.push_to_hub()
lowercase = AutoProcessor.from_pretrained(f'{USER}/test-dynamic-processor' , trust_remote_code=__lowerCAmelCase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 359
| 1
|
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__SCREAMING_SNAKE_CASE : Optional[Any] = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int:
require_version(deps[pkg] , _SCREAMING_SNAKE_CASE )
| 2
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
class __A :
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : list[str] ) ->List[Any]:
"""simple docstring"""
snake_case_ = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(UpperCAmelCase_ )
self.set_fail_transitions()
def lowerCAmelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str ) ->int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowerCAmelCase ( self : int , UpperCAmelCase_ : str ) ->None:
"""simple docstring"""
snake_case_ = 0
for character in keyword:
snake_case_ = self.find_next_state(UpperCAmelCase_ , UpperCAmelCase_ )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
snake_case_ = len(self.adlist ) - 1
else:
snake_case_ = next_state
self.adlist[current_state]["output"].append(UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->None:
"""simple docstring"""
snake_case_ = deque()
for node in self.adlist[0]["next_states"]:
q.append(UpperCAmelCase_ )
snake_case_ = 0
while q:
snake_case_ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(UpperCAmelCase_ )
snake_case_ = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(UpperCAmelCase_ , self.adlist[child]["""value"""] ) is None
and state != 0
):
snake_case_ = self.adlist[state]["""fail_state"""]
snake_case_ = self.find_next_state(
UpperCAmelCase_ , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
snake_case_ = 0
snake_case_ = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : str ) ->dict[str, list[int]]:
"""simple docstring"""
snake_case_ = {} # returns a dict with keywords and list of its occurrences
snake_case_ = 0
for i in range(len(UpperCAmelCase_ ) ):
while (
self.find_next_state(UpperCAmelCase_ , string[i] ) is None
and current_state != 0
):
snake_case_ = self.adlist[current_state]["""fail_state"""]
snake_case_ = self.find_next_state(UpperCAmelCase_ , string[i] )
if next_state is None:
snake_case_ = 0
else:
snake_case_ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
snake_case_ = []
result[key].append(i - len(UpperCAmelCase_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2
| 1
|
'''simple docstring'''
UpperCamelCase__ : Dict = '''\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'''
UpperCamelCase__ : int = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
UpperCamelCase__ : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 578
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A = "naver-clova-ix/donut-base-finetuned-docvqa"
_A = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_A = "document_qa"
_A = AutoProcessor
_A = VisionEncoderDecoderModel
_A = ["image", "text"]
_A = ["text"]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : str ):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : "Image" , SCREAMING_SNAKE_CASE_ : str ):
_a = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
_a = task_prompt.replace('{user_input}' , SCREAMING_SNAKE_CASE_ )
_a = self.pre_processor.tokenizer(
SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).input_ids
_a = self.pre_processor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ):
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=SCREAMING_SNAKE_CASE_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=SCREAMING_SNAKE_CASE_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=SCREAMING_SNAKE_CASE_ , ).sequences
def _UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
_a = self.pre_processor.batch_decode(SCREAMING_SNAKE_CASE_ )[0]
_a = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
_a = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
_a = re.sub(R'<.*?>' , '' , SCREAMING_SNAKE_CASE_ , count=1 ).strip() # remove first task start token
_a = self.pre_processor.tokenajson(SCREAMING_SNAKE_CASE_ )
return sequence["answer"]
| 562
| 0
|
'''simple docstring'''
UpperCAmelCase_ : int = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase_ : int = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ ( A ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ ( A ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : Union[str, Any] = 'Morse code here!'
print(A )
_a : List[Any] = encrypt(A )
print(A )
_a : int = decrypt(A )
print(A )
if __name__ == "__main__":
main()
| 424
|
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
__lowerCAmelCase : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 424
| 1
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
lowerCamelCase_ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCamelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> Any:
with open(__lowerCamelCase , "rb" ) as f:
_SCREAMING_SNAKE_CASE = Image.open(__lowerCamelCase )
return im.convert("RGB" )
@dataclass
class lowercase_ :
"""simple docstring"""
lowerCamelCase_ = field(
default=__snake_case , metadata={
'''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'''
} , )
lowerCamelCase_ = field(
default=__snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCamelCase_ = field(default=__snake_case , metadata={'''help''': '''A folder containing the training data.'''} )
lowerCamelCase_ = field(default=__snake_case , metadata={'''help''': '''A folder containing the validation data.'''} )
lowerCamelCase_ = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
lowerCamelCase_ = field(
default=__snake_case , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCamelCase_ = field(
default=__snake_case , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class lowercase_ :
"""simple docstring"""
lowerCamelCase_ = field(
default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
lowerCamelCase_ = field(
default=__snake_case , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__snake_case )} , )
lowerCamelCase_ = field(
default=__snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase_ = field(
default=__snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
lowerCamelCase_ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCamelCase_ = field(default=__snake_case , metadata={'''help''': '''Name or path of preprocessor config.'''} )
lowerCamelCase_ = field(
default=__snake_case , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowerCamelCase_ = field(
default=__snake_case , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> str:
_SCREAMING_SNAKE_CASE = torch.stack([example["pixel_values"] for example in examples] )
_SCREAMING_SNAKE_CASE = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , __lowerCamelCase , __lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(__lowerCamelCase )
transformers.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
_SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
_SCREAMING_SNAKE_CASE = {}
if data_args.train_dir is not None:
_SCREAMING_SNAKE_CASE = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
_SCREAMING_SNAKE_CASE = os.path.join(data_args.validation_dir , "**" )
_SCREAMING_SNAKE_CASE = load_dataset(
"imagefolder" , data_files=__lowerCamelCase , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
_SCREAMING_SNAKE_CASE = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowerCamelCase ) and data_args.train_val_split > 0.0:
_SCREAMING_SNAKE_CASE = dataset["train"].train_test_split(data_args.train_val_split )
_SCREAMING_SNAKE_CASE = split["train"]
_SCREAMING_SNAKE_CASE = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_SCREAMING_SNAKE_CASE = dataset["train"].features["labels"].names
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = {}, {}
for i, label in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = str(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = label
# Load the accuracy metric from the datasets package
_SCREAMING_SNAKE_CASE = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__A : Union[str, Any] ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowerCamelCase ) , labelaid=__lowerCamelCase , idalabel=__lowerCamelCase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_SCREAMING_SNAKE_CASE = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
_SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
_SCREAMING_SNAKE_CASE = image_processor.size["shortest_edge"]
else:
_SCREAMING_SNAKE_CASE = (image_processor.size["height"], image_processor.size["width"])
_SCREAMING_SNAKE_CASE = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
_SCREAMING_SNAKE_CASE = Compose(
[
RandomResizedCrop(__lowerCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
_SCREAMING_SNAKE_CASE = Compose(
[
Resize(__lowerCamelCase ),
CenterCrop(__lowerCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(__A : Tuple ):
_SCREAMING_SNAKE_CASE = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(__A : int ):
_SCREAMING_SNAKE_CASE = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_SCREAMING_SNAKE_CASE = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__lowerCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_SCREAMING_SNAKE_CASE = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__lowerCamelCase )
# Initalize our trainer
_SCREAMING_SNAKE_CASE = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=__lowerCamelCase , tokenizer=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
_SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
_SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_SCREAMING_SNAKE_CASE = last_checkpoint
_SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=__lowerCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_SCREAMING_SNAKE_CASE = trainer.evaluate()
trainer.log_metrics("eval" , __lowerCamelCase )
trainer.save_metrics("eval" , __lowerCamelCase )
# Write model card and (optionally) push to hub
_SCREAMING_SNAKE_CASE = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCamelCase )
else:
trainer.create_model_card(**__lowerCamelCase )
if __name__ == "__main__":
main()
| 418
|
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class a__ ( __snake_case ):
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
super().__init__()
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self , UpperCAmelCase = 1 , UpperCAmelCase = None , UpperCAmelCase = 5_0 , UpperCAmelCase = "pil" , UpperCAmelCase = True , **UpperCAmelCase , ) -> Union[ImagePipelineOutput, Tuple]:
__a = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCAmelCase , )
__a = image.to(self.device )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__a = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__a = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=UpperCAmelCase ), "This is a local test"
| 559
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 242
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase = logging.get_logger(__name__)
_lowercase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
_lowercase = {
'gpt-neox-20b': 2_048,
}
class lowerCamelCase__ ( A__ ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : int , __a : str=None , __a : Any=None , __a : Dict=None , __a : Tuple="<|endoftext|>" , __a : Tuple="<|endoftext|>" , __a : List[str]="<|endoftext|>" , __a : Any=False , **__a : Optional[Any] , ):
'''simple docstring'''
super().__init__(
__a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , add_prefix_space=__a , **__a , )
lowerCamelCase__: List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __a ) != add_prefix_space:
lowerCamelCase__: Optional[Any] = getattr(__a , pre_tok_state.pop("""type""" ) )
lowerCamelCase__: str = add_prefix_space
lowerCamelCase__: int = pre_tok_class(**__a )
lowerCamelCase__: Optional[Any] = add_prefix_space
def lowerCamelCase_ ( self : str , __a : str , __a : Optional[str] = None ):
'''simple docstring'''
lowerCamelCase__: Optional[int] = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def lowerCamelCase_ ( self : str , __a : "Conversation" ):
'''simple docstring'''
lowerCamelCase__: Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
lowerCamelCase__: Any = input_ids[-self.model_max_length :]
return input_ids
| 242
| 1
|
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_snake_case = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
_snake_case = f"""{src_lang}-{tgt_lang}"""
_snake_case = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
_snake_case = os.path.join(lowerCAmelCase_ , '''README.md''' )
print(f"""Generating {path}""" )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(lowerCAmelCase_ )
# make sure we are under the root of the project
snake_case = Path(__file__).resolve().parent.parent.parent
snake_case = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
snake_case , snake_case , snake_case = model_name.split('''-''')
snake_case = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 103
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 ):
lowerCamelCase__ , lowerCamelCase__ =row, column
lowerCamelCase__ =[[default_value for c in range(_lowerCamelCase )] for r in range(_lowerCamelCase )]
def __str__( self ):
lowerCamelCase__ =F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCamelCase__ =0
for row_vector in self.array:
for obj in row_vector:
lowerCamelCase__ =max(_lowerCamelCase , len(str(_lowerCamelCase ) ) )
lowerCamelCase__ =F'''%{max_element_length}s'''
# Make string and return
def single_line(_lowerCamelCase ) -> str:
nonlocal string_format_identifier
lowerCamelCase__ ="["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self ):
return str(self )
def _a ( self , _lowerCamelCase ):
if not (isinstance(_lowerCamelCase , (list, tuple) ) and len(_lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , _lowerCamelCase ):
assert self.validate_indicies(_lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , _lowerCamelCase , _lowerCamelCase ):
assert self.validate_indicies(_lowerCamelCase )
lowerCamelCase__ =value
def __add__( self , _lowerCamelCase ):
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
lowerCamelCase__ =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ =self[r, c] + another[r, c]
return result
def __neg__( self ):
lowerCamelCase__ =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ =-self[r, c]
return result
def __sub__( self , _lowerCamelCase ):
return self + (-another)
def __mul__( self , _lowerCamelCase ):
if isinstance(_lowerCamelCase , (int, float) ): # Scalar multiplication
lowerCamelCase__ =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ =self[r, c] * another
return result
elif isinstance(_lowerCamelCase , _lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
lowerCamelCase__ =Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCamelCase__ =F'''Unsupported type given for another ({type(_lowerCamelCase )})'''
raise TypeError(_lowerCamelCase )
def _a ( self ):
lowerCamelCase__ =Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ =self[r, c]
return result
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCamelCase__ =v.transpose()
lowerCamelCase__ =(v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
lowerCamelCase__ =Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCamelCase__ =1
print(F'''a^(-1) is {ainv}''' )
# u, v
lowerCamelCase__ =Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =1, 2, -3
lowerCamelCase__ =Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowerCAmelCase , __lowerCAmelCase )}''' )
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 530
| 0
|
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : Any , __snake_case : int , __snake_case : Dict=2 , __snake_case : Optional[int]=8 , __snake_case : Optional[Any]=True , __snake_case : Union[str, Any]=True , __snake_case : Any=True , __snake_case : Union[str, Any]=True , __snake_case : Optional[Any]=99 , __snake_case : int=16 , __snake_case : Tuple=5 , __snake_case : int=2 , __snake_case : Optional[int]=36 , __snake_case : Dict="gelu" , __snake_case : Tuple=0.0 , __snake_case : str=0.0 , __snake_case : Dict=512 , __snake_case : str=16 , __snake_case : Tuple=2 , __snake_case : Any=0.02 , __snake_case : List[str]=3 , __snake_case : List[Any]=4 , __snake_case : List[str]=None , ) -> Dict:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : Optional[Any] = seq_length
UpperCAmelCase : str = is_training
UpperCAmelCase : Tuple = use_input_mask
UpperCAmelCase : Any = use_token_type_ids
UpperCAmelCase : int = use_labels
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : Tuple = hidden_size
UpperCAmelCase : Any = num_hidden_layers
UpperCAmelCase : Optional[int] = num_attention_heads
UpperCAmelCase : str = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : List[str] = attention_probs_dropout_prob
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : str = type_vocab_size
UpperCAmelCase : Union[str, Any] = type_sequence_label_size
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : Tuple = num_labels
UpperCAmelCase : Dict = num_choices
UpperCAmelCase : Any = scope
def A ( self : Optional[int] ) -> Tuple:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : str = None
if self.use_token_type_ids:
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : List[Any] = None
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Optional[Any] = None
if self.use_labels:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Tuple ) -> int:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : Any = self.get_config()
UpperCAmelCase : Union[str, Any] = 300
return config
def A ( self : str ) -> List[Any]:
(
UpperCAmelCase
) : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase : Tuple = True
UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A ( self : List[Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : List[str] ) -> Dict:
UpperCAmelCase : Tuple = MraModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Any = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
UpperCAmelCase : Union[str, Any] = model(__snake_case , token_type_ids=__snake_case )
UpperCAmelCase : Dict = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : str , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Any , __snake_case : Tuple , __snake_case : List[str] , __snake_case : int , __snake_case : Any , __snake_case : Optional[int] , __snake_case : List[str] , ) -> List[str]:
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : List[Any] = MraModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : str = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , )
UpperCAmelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , encoder_hidden_states=__snake_case , )
UpperCAmelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Dict , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Tuple , __snake_case : int , __snake_case : Optional[int] , __snake_case : Optional[Any] ) -> List[str]:
UpperCAmelCase : Tuple = MraForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Optional[int] , __snake_case : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Dict , __snake_case : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : Dict = MraForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : int , __snake_case : Any , __snake_case : Tuple , __snake_case : str , __snake_case : Tuple , __snake_case : int ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = self.num_labels
UpperCAmelCase : Tuple = MraForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : List[Any] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Tuple , __snake_case : Dict , __snake_case : Any , __snake_case : int , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Union[str, Any] ) -> Any:
UpperCAmelCase : str = self.num_labels
UpperCAmelCase : int = MraForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Any , __snake_case : int , __snake_case : int , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Union[str, Any] ) -> Any:
UpperCAmelCase : Optional[int] = self.num_choices
UpperCAmelCase : List[str] = MraForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Dict = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : List[Any] ) -> int:
UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
UpperCAmelCase
) : Dict = config_and_inputs
UpperCAmelCase : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = ()
def A ( self : Any ) -> Tuple:
UpperCAmelCase : Dict = MraModelTester(self )
UpperCAmelCase : str = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def A ( self : Tuple ) -> Any:
self.config_tester.run_common_tests()
def A ( self : Optional[int] ) -> Dict:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : List[str] = type
self.model_tester.create_and_check_model(*__snake_case )
def A ( self : int ) -> Any:
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def A ( self : Tuple ) -> Optional[int]:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__snake_case )
def A ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def A ( self : Optional[int] ) -> str:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def A ( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@slow
def A ( self : str ) -> Tuple:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Any = MraModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip(reason='''MRA does not output attentions''' )
def A ( self : Any ) -> Any:
return
@require_torch
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Optional[int] ) -> Tuple:
UpperCAmelCase : int = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
UpperCAmelCase : str = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase : Dict = model(__snake_case )[0]
UpperCAmelCase : List[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase : List[Any] = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def A ( self : str ) -> Optional[int]:
UpperCAmelCase : List[Any] = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
UpperCAmelCase : int = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(__snake_case )[0]
UpperCAmelCase : List[Any] = 50265
UpperCAmelCase : Optional[Any] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase : Optional[int] = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def A ( self : Union[str, Any] ) -> Tuple:
UpperCAmelCase : Dict = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
UpperCAmelCase : Any = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase : str = model(__snake_case )[0]
UpperCAmelCase : str = 50265
UpperCAmelCase : str = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase : Optional[int] = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
| 708
|
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def snake_case_ ( _lowerCAmelCase : SplitDict ) -> List[Any]:
UpperCAmelCase : str = split_dict._to_yaml_list()
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = SplitDict._from_yaml_list(_lowerCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCAmelCase : Optional[Any] = None
# the split name of split_dict takes over the name of the split info object
UpperCAmelCase : Dict = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=_lowerCAmelCase ), SplitInfo(dataset_name='''my_dataset''' )] )
def snake_case_ ( _lowerCAmelCase : List[Any] ) -> Optional[Any]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
UpperCAmelCase : Any = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 528
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.