code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase : Union[str, Any] = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def A ( _lowercase ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowercase )
def A ( _lowercase ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : str = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_lowercase , id=_lowercase )
| 711
|
import random
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(_lowercase )
elif element > pivot:
greater.append(_lowercase )
else:
equal.append(_lowercase )
return less, equal, greater
def A ( _lowercase , _lowercase ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_lowercase ) or index < 0:
return None
SCREAMING_SNAKE_CASE : Dict = items[random.randint(0 , len(_lowercase ) - 1 )]
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = _partition(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowercase , _lowercase )
# must be in larger
else:
return quick_select(_lowercase , index - (m + count) )
| 34
| 0
|
from ..utils import DummyObject, requires_backends
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""note_seq"""]
def __init__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
requires_backends(self , ['''note_seq'''] )
@classmethod
def __A ( cls : List[str] , *UpperCamelCase__ : int , **UpperCamelCase__ : Dict ):
'''simple docstring'''
requires_backends(cls , ['''note_seq'''] )
@classmethod
def __A ( cls : List[Any] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['''note_seq'''] )
| 712
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
# TODO Update this
__UpperCamelCase : List[str] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """esm"""
def __init__( self : Tuple , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any=768 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : str=12 , UpperCamelCase__ : Optional[int]=3072 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Union[str, Any]=1026 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = position_embedding_type
SCREAMING_SNAKE_CASE : Any = use_cache
SCREAMING_SNAKE_CASE : Dict = emb_layer_norm_before
SCREAMING_SNAKE_CASE : List[str] = token_dropout
SCREAMING_SNAKE_CASE : List[Any] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
SCREAMING_SNAKE_CASE : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = EsmFoldConfig(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
SCREAMING_SNAKE_CASE : Optional[int] = get_default_vocab_list()
else:
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_list
else:
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = None
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = 0
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Optional[int] ):
'''simple docstring'''
if self.trunk is None:
SCREAMING_SNAKE_CASE : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = TrunkConfig(**self.trunk )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = asdict(self )
SCREAMING_SNAKE_CASE : Tuple = self.trunk.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 48
UpperCamelCase_ = 1_024
UpperCamelCase_ = 128
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = False
UpperCamelCase_ = 4
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Any ):
'''simple docstring'''
if self.structure_module is None:
SCREAMING_SNAKE_CASE : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
SCREAMING_SNAKE_CASE : Dict = self.sequence_state_dim // self.sequence_head_width
SCREAMING_SNAKE_CASE : Tuple = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = asdict(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.structure_module.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 384
UpperCamelCase_ = 128
UpperCamelCase_ = 16
UpperCamelCase_ = 128
UpperCamelCase_ = 12
UpperCamelCase_ = 4
UpperCamelCase_ = 8
UpperCamelCase_ = 0.1
UpperCamelCase_ = 8
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 7
UpperCamelCase_ = 10
UpperCamelCase_ = 1E-8
UpperCamelCase_ = 1E5
def __A ( self : Dict ):
'''simple docstring'''
return asdict(self )
def A ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 34
| 0
|
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__UpperCamelCase : Optional[int] = True
except ImportError:
__UpperCamelCase : str = False
try:
from torch.hub import _get_torch_home
__UpperCamelCase : int = _get_torch_home()
except ImportError:
__UpperCamelCase : Dict = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
__UpperCamelCase : List[Any] = os.path.join(torch_cache_home, 'transformers')
__UpperCamelCase : Optional[int] = 'https://cdn.huggingface.co'
__UpperCamelCase : Tuple = 'https://s3.amazonaws.com/models.huggingface.co/bert'
__UpperCamelCase : List[Any] = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
__UpperCamelCase : str = os.path.join(PATH, 'config.yaml')
__UpperCamelCase : Optional[Any] = os.path.join(PATH, 'attributes.txt')
__UpperCamelCase : Any = os.path.join(PATH, 'objects.txt')
__UpperCamelCase : str = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
__UpperCamelCase : List[Any] = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
__UpperCamelCase : str = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
__UpperCamelCase : int = 'pytorch_model.bin'
__UpperCamelCase : Any = 'config.yaml'
def A ( _lowercase=OBJECTS , _lowercase=ATTRIBUTES ) -> int:
SCREAMING_SNAKE_CASE : Tuple = []
with open(_lowercase ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
SCREAMING_SNAKE_CASE : Optional[Any] = []
with open(_lowercase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def A ( _lowercase ) -> str:
SCREAMING_SNAKE_CASE : Union[str, Any] = OrderedDict()
with open(_lowercase , '''rb''' ) as f:
SCREAMING_SNAKE_CASE : List[Any] = pkl.load(_lowercase )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
SCREAMING_SNAKE_CASE : Dict = ckp.pop(_lowercase )
if isinstance(_lowercase , np.ndarray ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(_lowercase )
else:
assert isinstance(_lowercase , torch.tensor ), type(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = v
return r
class lowercase__ :
UpperCamelCase_ = {}
def __init__( self : List[Any] , UpperCamelCase__ : dict , UpperCamelCase__ : str = "root" , UpperCamelCase__ : Dict=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = name
SCREAMING_SNAKE_CASE : Tuple = level
SCREAMING_SNAKE_CASE : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = Config(UpperCamelCase__ , name=UpperCamelCase__ , level=level + 1 )
SCREAMING_SNAKE_CASE : Optional[int] = v
setattr(self , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = d
def __repr__( self : List[Any] ):
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = val
SCREAMING_SNAKE_CASE : Optional[Any] = val
SCREAMING_SNAKE_CASE : Optional[int] = key.split('''.''' )
SCREAMING_SNAKE_CASE : List[str] = len(UpperCamelCase__ ) - 1
SCREAMING_SNAKE_CASE : List[str] = self._pointer
if len(UpperCamelCase__ ) > 1:
for i, l in enumerate(UpperCamelCase__ ):
if hasattr(self , UpperCamelCase__ ) and isinstance(getattr(self , UpperCamelCase__ ) , UpperCamelCase__ ):
setattr(getattr(self , UpperCamelCase__ ) , '''.'''.join(levels[i:] ) , UpperCamelCase__ )
if l == last_level:
SCREAMING_SNAKE_CASE : Tuple = val
else:
SCREAMING_SNAKE_CASE : Any = pointer[l]
def __A ( self : List[Any] ):
'''simple docstring'''
return self._pointer
def __A ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
with open(f"""{file_name}""" , '''w''' ) as stream:
dump(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
with open(f"""{file_name}""" , '''w''' ) as stream:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
@staticmethod
def __A ( UpperCamelCase__ : Dict ):
'''simple docstring'''
with open(UpperCamelCase__ ) as stream:
SCREAMING_SNAKE_CASE : Tuple = load(UpperCamelCase__ , Loader=UpperCamelCase__ )
return data
def __str__( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = ''' '''
if self._name != "root":
SCREAMING_SNAKE_CASE : Optional[int] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
SCREAMING_SNAKE_CASE : int = ''''''
SCREAMING_SNAKE_CASE : Optional[int] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(UpperCamelCase__ ).__name__})\n"""
SCREAMING_SNAKE_CASE : Optional[Any] = level
return r[:-1]
@classmethod
def __A ( cls : Dict , UpperCamelCase__ : str , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
return cls(UpperCamelCase__ )
@classmethod
def __A ( cls : Union[str, Any] , UpperCamelCase__ : str , **UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = kwargs.pop('''cache_dir''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = kwargs.pop('''force_download''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''resume_download''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('''proxies''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''local_files_only''' , UpperCamelCase__ )
if os.path.isdir(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
elif os.path.isfile(UpperCamelCase__ ) or is_remote_url(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Any = pretrained_model_name_or_path
else:
SCREAMING_SNAKE_CASE : Optional[Any] = hf_bucket_url(UpperCamelCase__ , filename=UpperCamelCase__ , use_cdn=UpperCamelCase__ )
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE : Optional[Any] = cached_path(
UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , proxies=UpperCamelCase__ , resume_download=UpperCamelCase__ , local_files_only=UpperCamelCase__ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
SCREAMING_SNAKE_CASE : List[Any] = Config.load_yaml(UpperCamelCase__ )
except EnvironmentError:
SCREAMING_SNAKE_CASE : Dict = '''Can\'t load config for'''
raise EnvironmentError(UpperCamelCase__ )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(UpperCamelCase__ ), kwargs
def A ( _lowercase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : List[Any] = torch.load('''dump.pt''' , map_location=in_tensor.device )
SCREAMING_SNAKE_CASE : Tuple = in_tensor.numpy()
SCREAMING_SNAKE_CASE : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(_lowercase , _lowercase , rtol=0.01 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(_lowercase , _lowercase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def A ( _lowercase ) -> Dict:
SCREAMING_SNAKE_CASE : List[str] = urlparse(_lowercase )
return parsed.scheme in ("http", "https")
def A ( _lowercase , _lowercase , _lowercase=True ) -> str:
SCREAMING_SNAKE_CASE : Optional[Any] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
SCREAMING_SNAKE_CASE : Optional[int] = '''/''' not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def A ( _lowercase , _lowercase , _lowercase=None , _lowercase=0 , _lowercase=None , ) -> List[str]:
SCREAMING_SNAKE_CASE : List[str] = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_lowercase , _lowercase ):
ua += "; " + "; ".join('''{}/{}'''.format(_lowercase , _lowercase ) for k, v in user_agent.items() )
elif isinstance(_lowercase , _lowercase ):
ua += "; " + user_agent
SCREAMING_SNAKE_CASE : List[str] = {'''user-agent''': ua}
if resume_size > 0:
SCREAMING_SNAKE_CASE : Dict = '''bytes=%d-''' % (resume_size,)
SCREAMING_SNAKE_CASE : Tuple = requests.get(_lowercase , stream=_lowercase , proxies=_lowercase , headers=_lowercase )
if response.status_code == 416: # Range not satisfiable
return
SCREAMING_SNAKE_CASE : Dict = response.headers.get('''Content-Length''' )
SCREAMING_SNAKE_CASE : List[Any] = resume_size + int(_lowercase ) if content_length is not None else None
SCREAMING_SNAKE_CASE : Optional[Any] = tqdm(
unit='''B''' , unit_scale=_lowercase , total=_lowercase , initial=_lowercase , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_lowercase ) )
temp_file.write(_lowercase )
progress.close()
def A ( _lowercase , _lowercase=None , _lowercase=False , _lowercase=None , _lowercase=10 , _lowercase=False , _lowercase=None , _lowercase=False , ) -> List[Any]:
if cache_dir is None:
SCREAMING_SNAKE_CASE : List[Any] = TRANSFORMERS_CACHE
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = str(_lowercase )
os.makedirs(_lowercase , exist_ok=_lowercase )
SCREAMING_SNAKE_CASE : str = None
if not local_files_only:
try:
SCREAMING_SNAKE_CASE : int = requests.head(_lowercase , allow_redirects=_lowercase , proxies=_lowercase , timeout=_lowercase )
if response.status_code == 200:
SCREAMING_SNAKE_CASE : List[Any] = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
SCREAMING_SNAKE_CASE : Optional[int] = url_to_filename(_lowercase , _lowercase )
# get cache path to put the file
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(_lowercase , _lowercase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_lowercase ):
return cache_path
else:
SCREAMING_SNAKE_CASE : Any = [
file
for file in fnmatch.filter(os.listdir(_lowercase ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(_lowercase ) > 0:
return os.path.join(_lowercase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(_lowercase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
SCREAMING_SNAKE_CASE : List[str] = cache_path + '''.lock'''
with FileLock(_lowercase ):
# If the download just completed while the lock was activated.
if os.path.exists(_lowercase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
SCREAMING_SNAKE_CASE : int = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(_lowercase , '''a+b''' ) as f:
yield f
SCREAMING_SNAKE_CASE : List[Any] = _resumable_file_manager
if os.path.exists(_lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = os.stat(_lowercase ).st_size
else:
SCREAMING_SNAKE_CASE : Dict = 0
else:
SCREAMING_SNAKE_CASE : Any = partial(tempfile.NamedTemporaryFile , dir=_lowercase , delete=_lowercase )
SCREAMING_SNAKE_CASE : List[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , _lowercase , temp_file.name , )
http_get(
_lowercase , _lowercase , proxies=_lowercase , resume_size=_lowercase , user_agent=_lowercase , )
os.replace(temp_file.name , _lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = {'''url''': url, '''etag''': etag}
SCREAMING_SNAKE_CASE : int = cache_path + '''.json'''
with open(_lowercase , '''w''' ) as meta_file:
json.dump(_lowercase , _lowercase )
return cache_path
def A ( _lowercase , _lowercase=None ) -> Dict:
SCREAMING_SNAKE_CASE : Optional[int] = url.encode('''utf-8''' )
SCREAMING_SNAKE_CASE : Optional[Any] = shaaaa(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = url_hash.hexdigest()
if etag:
SCREAMING_SNAKE_CASE : Any = etag.encode('''utf-8''' )
SCREAMING_SNAKE_CASE : Dict = shaaaa(_lowercase )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def A ( _lowercase , _lowercase=None , _lowercase=False , _lowercase=None , _lowercase=False , _lowercase=None , _lowercase=False , _lowercase=False , _lowercase=False , ) -> List[Any]:
if cache_dir is None:
SCREAMING_SNAKE_CASE : List[Any] = TRANSFORMERS_CACHE
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : int = str(_lowercase )
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : int = str(_lowercase )
if is_remote_url(_lowercase ):
# URL, so get it from the cache (downloading if necessary)
SCREAMING_SNAKE_CASE : List[Any] = get_from_cache(
_lowercase , cache_dir=_lowercase , force_download=_lowercase , proxies=_lowercase , resume_download=_lowercase , user_agent=_lowercase , local_files_only=_lowercase , )
elif os.path.exists(_lowercase ):
# File, and it exists.
SCREAMING_SNAKE_CASE : Optional[Any] = url_or_filename
elif urlparse(_lowercase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(_lowercase ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(_lowercase ) )
if extract_compressed_file:
if not is_zipfile(_lowercase ) and not tarfile.is_tarfile(_lowercase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
SCREAMING_SNAKE_CASE : str = os.path.split(_lowercase )
SCREAMING_SNAKE_CASE : Any = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
SCREAMING_SNAKE_CASE : List[str] = os.path.join(_lowercase , _lowercase )
if os.path.isdir(_lowercase ) and os.listdir(_lowercase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
SCREAMING_SNAKE_CASE : int = output_path + '''.lock'''
with FileLock(_lowercase ):
shutil.rmtree(_lowercase , ignore_errors=_lowercase )
os.makedirs(_lowercase )
if is_zipfile(_lowercase ):
with ZipFile(_lowercase , '''r''' ) as zip_file:
zip_file.extractall(_lowercase )
zip_file.close()
elif tarfile.is_tarfile(_lowercase ):
SCREAMING_SNAKE_CASE : Dict = tarfile.open(_lowercase )
tar_file.extractall(_lowercase )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(_lowercase ) )
return output_path_extracted
return output_path
def A ( _lowercase , _lowercase="," ) -> Optional[int]:
assert isinstance(_lowercase , _lowercase )
if os.path.isfile(_lowercase ):
with open(_lowercase ) as f:
SCREAMING_SNAKE_CASE : Union[str, Any] = eval(f.read() )
else:
SCREAMING_SNAKE_CASE : Tuple = requests.get(_lowercase )
try:
SCREAMING_SNAKE_CASE : str = requests.json()
except Exception:
SCREAMING_SNAKE_CASE : Tuple = req.content.decode()
assert data is not None, "could not connect"
try:
SCREAMING_SNAKE_CASE : List[str] = eval(_lowercase )
except Exception:
SCREAMING_SNAKE_CASE : Optional[int] = data.split('''\n''' )
req.close()
return data
def A ( _lowercase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Any = requests.get(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def A ( _lowercase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_lowercase )
with open(_lowercase , '''rb''' ) as stream:
SCREAMING_SNAKE_CASE : int = pkl.load(_lowercase )
SCREAMING_SNAKE_CASE : str = weights.pop('''model''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for k, v in model.items():
SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(_lowercase )
if "running_var" in k:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([0] )
SCREAMING_SNAKE_CASE : int = k.replace('''running_var''' , '''num_batches_tracked''' )
SCREAMING_SNAKE_CASE : int = zero
return new
def A ( ) -> Optional[int]:
print(f"""{os.path.abspath(os.path.join(_lowercase , os.pardir ) )}/demo.ipynb""" )
def A ( _lowercase , _lowercase="RGB" ) -> str:
assert isinstance(_lowercase , _lowercase )
if os.path.isfile(_lowercase ):
SCREAMING_SNAKE_CASE : Any = cva.imread(_lowercase )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = get_image_from_url(_lowercase )
assert img is not None, f"""could not connect to: {im}"""
SCREAMING_SNAKE_CASE : int = cva.cvtColor(_lowercase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
SCREAMING_SNAKE_CASE : Optional[Any] = img[:, :, ::-1]
return img
def A ( _lowercase , _lowercase=1 ) -> int:
return (images[i : i + batch] for i in range(0 , len(_lowercase ) , _lowercase ))
| 713
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
])
class lowercase__ ( unittest.TestCase):
def __A ( self : Any ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=UpperCamelCase__ , )
assert hasattr(self , '''env''' )
def __A ( self : str , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
SCREAMING_SNAKE_CASE : Any = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCamelCase__ , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version='''py36''' , )
def __A ( self : Optional[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __A ( self : Tuple , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCamelCase__ )
| 34
| 0
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
# TODO Update this
__UpperCamelCase : List[str] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """esm"""
def __init__( self : Tuple , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any=768 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : str=12 , UpperCamelCase__ : Optional[int]=3072 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Union[str, Any]=1026 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = position_embedding_type
SCREAMING_SNAKE_CASE : Any = use_cache
SCREAMING_SNAKE_CASE : Dict = emb_layer_norm_before
SCREAMING_SNAKE_CASE : List[str] = token_dropout
SCREAMING_SNAKE_CASE : List[Any] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
SCREAMING_SNAKE_CASE : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = EsmFoldConfig(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
SCREAMING_SNAKE_CASE : Optional[int] = get_default_vocab_list()
else:
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_list
else:
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = None
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = 0
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Optional[int] ):
'''simple docstring'''
if self.trunk is None:
SCREAMING_SNAKE_CASE : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = TrunkConfig(**self.trunk )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = asdict(self )
SCREAMING_SNAKE_CASE : Tuple = self.trunk.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 48
UpperCamelCase_ = 1_024
UpperCamelCase_ = 128
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = False
UpperCamelCase_ = 4
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Any ):
'''simple docstring'''
if self.structure_module is None:
SCREAMING_SNAKE_CASE : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
SCREAMING_SNAKE_CASE : Dict = self.sequence_state_dim // self.sequence_head_width
SCREAMING_SNAKE_CASE : Tuple = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = asdict(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.structure_module.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 384
UpperCamelCase_ = 128
UpperCamelCase_ = 16
UpperCamelCase_ = 128
UpperCamelCase_ = 12
UpperCamelCase_ = 4
UpperCamelCase_ = 8
UpperCamelCase_ = 0.1
UpperCamelCase_ = 8
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 7
UpperCamelCase_ = 10
UpperCamelCase_ = 1E-8
UpperCamelCase_ = 1E5
def __A ( self : Dict ):
'''simple docstring'''
return asdict(self )
def A ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 714
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCamelCase : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCamelCase : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if ' ' in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__UpperCamelCase : Optional[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 34
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Optional[Any] = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Dict = None
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Union[str, Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = TaTokenizer
UpperCamelCase_ = []
def __init__( self : str , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]=100 , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE : List[str] = [f"""<extra_id_{i}>""" for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
SCREAMING_SNAKE_CASE : int = len(set(filter(lambda UpperCamelCase__ : bool('''extra_id_''' in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : int = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : str = extra_ids
@staticmethod
def __A ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
SCREAMING_SNAKE_CASE : List[str] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase__ , )
return max_model_length
def __A ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Any = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def __A ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
SCREAMING_SNAKE_CASE : Tuple = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __A ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(r'''<extra_id_\d+>''' , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self : List[Any] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 34
| 0
|
from functools import lru_cache
@lru_cache
def A ( _lowercase ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCamelCase : str = False
class lowercase__ ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = VersatileDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = '''cyberpunk 2077'''
SCREAMING_SNAKE_CASE : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe.dual_guided(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe.text_to_image(
prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.image_variation(UpperCamelCase__ , generator=UpperCamelCase__ , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 34
| 0
|
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ComputeEnvironment.AMAZON_SAGEMAKER
UpperCamelCase_ = True
UpperCamelCase_ = """ml.p3.2xlarge"""
UpperCamelCase_ = """accelerate_sagemaker_execution_role"""
UpperCamelCase_ = """hf-sm"""
UpperCamelCase_ = """us-east-1"""
UpperCamelCase_ = 1
UpperCamelCase_ = """accelerate-sagemaker-1"""
UpperCamelCase_ = """1.6"""
UpperCamelCase_ = """4.4"""
UpperCamelCase_ = """train.py"""
UpperCamelCase_ = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
UpperCamelCase_ = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class lowercase__ ( unittest.TestCase):
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase__ )
assert isinstance(converted_args['''do_train'''] , UpperCamelCase__ )
assert isinstance(converted_args['''epochs'''] , UpperCamelCase__ )
assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase__ )
assert isinstance(converted_args['''max_steps'''] , UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 717
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def A ( _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = analyze_text(_lowercase )
SCREAMING_SNAKE_CASE : Any = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
SCREAMING_SNAKE_CASE : Tuple = sum(single_char_strings.values() )
# one length string
SCREAMING_SNAKE_CASE : Tuple = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
SCREAMING_SNAKE_CASE : Tuple = single_char_strings[ch]
SCREAMING_SNAKE_CASE : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
SCREAMING_SNAKE_CASE : Optional[Any] = sum(two_char_strings.values() )
SCREAMING_SNAKE_CASE : List[str] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
SCREAMING_SNAKE_CASE : Union[str, Any] = cha + cha
if sequence in two_char_strings:
SCREAMING_SNAKE_CASE : Any = two_char_strings[sequence]
SCREAMING_SNAKE_CASE : Dict = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = Counter() # type: ignore
SCREAMING_SNAKE_CASE : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def A ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 34
| 0
|
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowercase__ :
@staticmethod
def __A ( *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Dict ):
'''simple docstring'''
pass
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : str = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = np.array(_lowercase )
SCREAMING_SNAKE_CASE : Any = npimg.shape
return {"hash": hashimage(_lowercase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowercase__ ( unittest.TestCase):
UpperCamelCase_ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items()) if MODEL_FOR_MASK_GENERATION_MAPPING else []))
UpperCamelCase_ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items()) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []))
def __A ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = MaskGenerationPipeline(model=UpperCamelCase__ , image_processor=UpperCamelCase__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __A ( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def __A ( self : int ):
'''simple docstring'''
pass
@slow
@require_torch
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
SCREAMING_SNAKE_CASE : List[str] = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
SCREAMING_SNAKE_CASE : int = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = '''facebook/sam-vit-huge'''
SCREAMING_SNAKE_CASE : Tuple = pipeline('''mask-generation''' , model=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053},
] , )
| 718
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Tuple = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34
| 0
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[int] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
SCREAMING_SNAKE_CASE : Any = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=UpperCamelCase__ , cache_dir=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = [t[-1] for t in os.walk(os.path.join(UpperCamelCase__ , os.listdir(UpperCamelCase__ )[0] , '''snapshots''' ) )]
SCREAMING_SNAKE_CASE : List[Any] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class lowercase__ ( unittest.TestCase):
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
SCREAMING_SNAKE_CASE : Any = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Optional[int] = 4
SCREAMING_SNAKE_CASE : Optional[int] = jax.device_count()
SCREAMING_SNAKE_CASE : Dict = num_samples * [prompt]
SCREAMING_SNAKE_CASE : Any = pipeline.prepare_inputs(UpperCamelCase__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE : List[Any] = replicate(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = jax.random.split(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = shard(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.151_4745 ) < 1E-3
assert np.abs(np.abs(UpperCamelCase__ , dtype=np.floataa ).sum() - 4_9947.875 ) < 5E-1
SCREAMING_SNAKE_CASE : str = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(UpperCamelCase__ ) == num_samples
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
SCREAMING_SNAKE_CASE : List[str] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Optional[int] = 50
SCREAMING_SNAKE_CASE : str = jax.device_count()
SCREAMING_SNAKE_CASE : Union[str, Any] = num_samples * [prompt]
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline.prepare_inputs(UpperCamelCase__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE : Optional[Any] = replicate(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = jax.random.split(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = shard(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0565_2401) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase__ , dtype=np.floataa ).sum() - 238_3808.2) ) < 5E-1
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
SCREAMING_SNAKE_CASE : List[str] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = 50
SCREAMING_SNAKE_CASE : Optional[int] = jax.device_count()
SCREAMING_SNAKE_CASE : int = num_samples * [prompt]
SCREAMING_SNAKE_CASE : List[Any] = pipeline.prepare_inputs(UpperCamelCase__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE : str = replicate(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.split(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = shard(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase__ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = 50
SCREAMING_SNAKE_CASE : Optional[int] = jax.device_count()
SCREAMING_SNAKE_CASE : Union[str, Any] = num_samples * [prompt]
SCREAMING_SNAKE_CASE : str = pipeline.prepare_inputs(UpperCamelCase__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE : List[str] = replicate(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = jax.random.split(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = shard(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase__ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = FlaxDDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , set_alpha_to_one=UpperCamelCase__ , steps_offset=1 , )
SCREAMING_SNAKE_CASE : Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Any = scheduler.create_state()
SCREAMING_SNAKE_CASE : Tuple = scheduler_state
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : str = 50
SCREAMING_SNAKE_CASE : Tuple = jax.device_count()
SCREAMING_SNAKE_CASE : Optional[Any] = num_samples * [prompt]
SCREAMING_SNAKE_CASE : str = pipeline.prepare_inputs(UpperCamelCase__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE : Any = replicate(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.split(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = shard(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase__ , dtype=np.floataa ).sum() - 234_7693.5) ) < 5E-1
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
SCREAMING_SNAKE_CASE : int = jax.device_count()
SCREAMING_SNAKE_CASE : Any = num_samples * [prompt]
SCREAMING_SNAKE_CASE : List[str] = jax.random.split(jax.random.PRNGKey(0 ) , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = replicate(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = pipeline.prepare_inputs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = shard(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
SCREAMING_SNAKE_CASE : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=UpperCamelCase__ , use_memory_efficient_attention=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = replicate(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = pipeline.prepare_inputs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = shard(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE : str = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 719
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Tuple = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['MaskFormerFeatureExtractor']
__UpperCamelCase : List[Any] = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
__UpperCamelCase : Union[str, Any] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 34
| 0
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowercase__ :
UpperCamelCase_ = 42 # [batch_size x 3]
UpperCamelCase_ = 42 # [batch_size x 3]
UpperCamelCase_ = 42 # [batch_size x 3]
UpperCamelCase_ = 42 # [batch_size x 3]
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
def __A ( self : List[Any] ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __A ( self : str ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __A ( self : List[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = torch.arange(self.height * self.width )
SCREAMING_SNAKE_CASE : List[str] = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCamelCase__ , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.shape
SCREAMING_SNAKE_CASE : List[str] = int(np.prod(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_coords()
SCREAMING_SNAKE_CASE : List[str] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
SCREAMING_SNAKE_CASE : List[str] = self.get_camera_rays(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = rays.view(UpperCamelCase__ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __A ( self : Dict , UpperCamelCase__ : torch.Tensor ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = coords.view(UpperCamelCase__ , -1 , 2 )
SCREAMING_SNAKE_CASE : Any = self.resolution()
SCREAMING_SNAKE_CASE : str = self.fov()
SCREAMING_SNAKE_CASE : str = (flat.float() / (res - 1)) * 2 - 1
SCREAMING_SNAKE_CASE : List[str] = fracs * torch.tan(fov / 2 )
SCREAMING_SNAKE_CASE : int = fracs.view(UpperCamelCase__ , -1 , 2 )
SCREAMING_SNAKE_CASE : Optional[Any] = (
self.z.view(UpperCamelCase__ , 1 , 3 )
+ self.x.view(UpperCamelCase__ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCamelCase__ , 1 , 3 ) * fracs[:, :, 1:]
)
SCREAMING_SNAKE_CASE : Tuple = directions / directions.norm(dim=-1 , keepdim=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCamelCase__ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCamelCase__ , *UpperCamelCase__ , 2 , 3 )
def __A ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCamelCase__ , height=UpperCamelCase__ , x_fov=self.x_fov , y_fov=self.y_fov , )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
SCREAMING_SNAKE_CASE : int = np.array([np.sin(_lowercase ), np.cos(_lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
SCREAMING_SNAKE_CASE : Tuple = -z * 4
SCREAMING_SNAKE_CASE : Optional[int] = np.array([np.cos(_lowercase ), -np.sin(_lowercase ), 0.0] )
SCREAMING_SNAKE_CASE : Tuple = np.cross(_lowercase , _lowercase )
origins.append(_lowercase )
xs.append(_lowercase )
ys.append(_lowercase )
zs.append(_lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , width=_lowercase , height=_lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(_lowercase )) , )
| 720
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCamelCase : Dict = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def A ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = True
while ask_again:
SCREAMING_SNAKE_CASE : Optional[Any] = input(_lowercase )
try:
if default is not None and len(_lowercase ) == 0:
return default
return convert_value(_lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowercase )
def A ( _lowercase , _lowercase=[] , _lowercase=None , _lowercase=0 ):
SCREAMING_SNAKE_CASE : Dict = BulletMenu(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : str = menu.run(default_choice=_lowercase )
return convert_value(_lowercase ) if convert_value is not None else result
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = int(_lowercase )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def A ( _lowercase ):
return {"yes": True, "no": False}[value.lower()]
class lowercase__ ( argparse.RawDescriptionHelpFormatter):
def __A ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = super()._format_usage(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 34
| 0
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = DebertaTokenizer
UpperCamelCase_ = True
UpperCamelCase_ = DebertaTokenizerFast
def __A ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
SCREAMING_SNAKE_CASE : str = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
SCREAMING_SNAKE_CASE : List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE : Optional[Any] = {'''unk_token''': '''[UNK]'''}
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
def __A ( self : Any , **UpperCamelCase__ : str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = '''lower newer'''
SCREAMING_SNAKE_CASE : int = '''lower newer'''
return input_text, output_text
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Dict = '''lower newer'''
SCREAMING_SNAKE_CASE : str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : Optional[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer('''Hello''' , '''World''' )
SCREAMING_SNAKE_CASE : Dict = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , UpperCamelCase__ )
@slow
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
SCREAMING_SNAKE_CASE : str = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = [tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) for seq in encoding['''input_ids''']]
# fmt: off
SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
SCREAMING_SNAKE_CASE : str = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , UpperCamelCase__ )
for expected, decoded in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 721
|
from __future__ import annotations
from typing import Any
class lowercase__ ( UpperCamelCase_):
pass
class lowercase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = data
SCREAMING_SNAKE_CASE : Node | None = None
def __iter__( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self
SCREAMING_SNAKE_CASE : Tuple = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase__ )
yield node.data
SCREAMING_SNAKE_CASE : Dict = node.next_node
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__UpperCamelCase : List[Any] = Node(1)
__UpperCamelCase : str = Node(2)
__UpperCamelCase : Dict = Node(3)
__UpperCamelCase : List[Any] = Node(4)
print(root_node.has_loop) # False
__UpperCamelCase : int = root_node.next_node
print(root_node.has_loop) # True
__UpperCamelCase : Union[str, Any] = Node(5)
__UpperCamelCase : Union[str, Any] = Node(6)
__UpperCamelCase : List[Any] = Node(5)
__UpperCamelCase : List[str] = Node(6)
print(root_node.has_loop) # False
__UpperCamelCase : List[Any] = Node(1)
print(root_node.has_loop) # False
| 34
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""pixel_values"""]
def __init__( self : Any , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PIL.Image.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'''height''': 256, '''width''': 256}
SCREAMING_SNAKE_CASE : Dict = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(UpperCamelCase__ , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE : str = do_resize
SCREAMING_SNAKE_CASE : int = size
SCREAMING_SNAKE_CASE : Dict = resample
SCREAMING_SNAKE_CASE : str = do_center_crop
SCREAMING_SNAKE_CASE : Dict = crop_size
SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale
SCREAMING_SNAKE_CASE : List[str] = rescale_factor
SCREAMING_SNAKE_CASE : Tuple = do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PIL.Image.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
UpperCamelCase__ , size=(size['''height'''], size['''width''']) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(UpperCamelCase__ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : List[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : int=None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : int = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : int = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Any = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Dict = get_size_dict(UpperCamelCase__ , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE : List[str] = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Dict = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Optional[Any] = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : Optional[int] = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : int = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Tuple = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Optional[Any] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 700
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=64 , UpperCamelCase__ : Optional[Any]=4_8000 , UpperCamelCase__ : Tuple=480 , UpperCamelCase__ : Union[str, Any]=10 , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : int=False , UpperCamelCase__ : float = 0 , UpperCamelCase__ : float = 1_4000 , UpperCamelCase__ : int = None , UpperCamelCase__ : str = "fusion" , UpperCamelCase__ : str = "repeatpad" , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = top_db
SCREAMING_SNAKE_CASE : Union[str, Any] = truncation
SCREAMING_SNAKE_CASE : str = padding
SCREAMING_SNAKE_CASE : List[Any] = fft_window_size
SCREAMING_SNAKE_CASE : Tuple = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE : List[str] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = max_length_s
SCREAMING_SNAKE_CASE : Tuple = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE : List[Any] = sampling_rate
SCREAMING_SNAKE_CASE : List[str] = frequency_min
SCREAMING_SNAKE_CASE : Any = frequency_max
SCREAMING_SNAKE_CASE : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm=UpperCamelCase__ , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm='''slaney''' , mel_scale='''slaney''' , )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __A ( self : Optional[int] , UpperCamelCase__ : np.array , UpperCamelCase__ : Optional[np.array] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = spectrogram(
UpperCamelCase__ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase__ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : Any = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE : List[Any] = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE : int = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE : Optional[int] = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE : Optional[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE : Tuple = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE : str = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase__ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __A ( self : Dict , UpperCamelCase__ : np.array , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE : Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ ) - max_length
SCREAMING_SNAKE_CASE : Dict = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
SCREAMING_SNAKE_CASE : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE : List[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE : Tuple = False
else:
SCREAMING_SNAKE_CASE : str = self._random_mel_fusion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
SCREAMING_SNAKE_CASE : List[str] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE : Tuple = int(max_length / len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Any = np.stack(np.tile(UpperCamelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE : List[Any] = int(max_length / len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = np.stack(np.tile(UpperCamelCase__ , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = np.pad(UpperCamelCase__ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE : List[Any] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE : List[str] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__ : str = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE : List[str] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE : List[str] = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Any = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE : List[Any] = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : List[str] = [np.asarray(UpperCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE : int = [
self._get_input_mel(UpperCamelCase__ , max_length if max_length else self.nb_max_samples , UpperCamelCase__ , UpperCamelCase__ )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[str] = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase__ )
is_longer.append(UpperCamelCase__ )
if truncation == "fusion" and sum(UpperCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = True
if isinstance(input_mel[0] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE : Optional[Any] = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE : int = BatchFeature(UpperCamelCase__ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : int = input_features.convert_to_tensors(UpperCamelCase__ )
return input_features
| 34
| 0
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowercase__ ( UpperCamelCase_):
def __init__( self : Union[str, Any] , UpperCamelCase__ : int = 101 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = length
def __len__( self : Dict ):
'''simple docstring'''
return self.length
def __getitem__( self : int , UpperCamelCase__ : str ):
'''simple docstring'''
return i
class lowercase__ :
def __call__( self : Dict , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return {"input_ids": torch.tensor(UpperCamelCase__ ), "labels": torch.tensor(UpperCamelCase__ )}
class lowercase__ ( nn.Module):
def __init__( self : List[str] ):
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
SCREAMING_SNAKE_CASE : Dict = nn.Linear(120 , 80 )
def __A ( self : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any=None ):
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class lowercase__ ( UpperCamelCase_):
@require_torch_neuroncore
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = f"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
SCREAMING_SNAKE_CASE : Tuple = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Optional[int] = f"""--output_dir {output_dir}""".split()
SCREAMING_SNAKE_CASE : Optional[int] = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(UpperCamelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowercase__ ( UpperCamelCase_):
@require_torch_multi_gpu
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = f"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
SCREAMING_SNAKE_CASE : Any = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : List[Any] = f"""--output_dir {output_dir}""".split()
SCREAMING_SNAKE_CASE : str = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(UpperCamelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__UpperCamelCase : Any = HfArgumentParser((TrainingArguments,))
__UpperCamelCase : Tuple = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
f"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__UpperCamelCase : List[str] = DummyDataset(dataset_length)
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = list(range(len(_lowercase ) ) )
SCREAMING_SNAKE_CASE : List[str] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
__UpperCamelCase : Any = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__UpperCamelCase : str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__UpperCamelCase : Tuple = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__UpperCamelCase : Any = 2
__UpperCamelCase : Any = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__UpperCamelCase : Optional[int] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__UpperCamelCase : List[str] = None
| 701
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """layoutlmv3"""
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any]=5_0265 , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Tuple=3072 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=512 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[Any]=1E-5 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : int=0 , UpperCamelCase__ : str=2 , UpperCamelCase__ : List[str]=1024 , UpperCamelCase__ : str=128 , UpperCamelCase__ : str=128 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[int]=32 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Optional[Any]=64 , UpperCamelCase__ : Dict=256 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Dict=224 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
vocab_size=UpperCamelCase__ , hidden_size=UpperCamelCase__ , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , intermediate_size=UpperCamelCase__ , hidden_act=UpperCamelCase__ , hidden_dropout_prob=UpperCamelCase__ , attention_probs_dropout_prob=UpperCamelCase__ , max_position_embeddings=UpperCamelCase__ , type_vocab_size=UpperCamelCase__ , initializer_range=UpperCamelCase__ , layer_norm_eps=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : List[str] = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : str = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Union[str, Any] = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Union[str, Any] = text_embed
SCREAMING_SNAKE_CASE : List[str] = visual_embed
SCREAMING_SNAKE_CASE : Optional[Any] = input_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = patch_size
SCREAMING_SNAKE_CASE : List[Any] = classifier_dropout
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = version.parse("""1.12""")
@property
def __A ( self : str ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __A ( self : int ):
'''simple docstring'''
return 1E-5
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Optional[Any] , UpperCamelCase__ : "ProcessorMixin" , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , UpperCamelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : List[Any] = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : Any = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = dict(
processor(
UpperCamelCase__ , text=UpperCamelCase__ , boxes=UpperCamelCase__ , return_tensors=UpperCamelCase__ , ) )
return inputs
| 34
| 0
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase_)
class lowercase__ ( UpperCamelCase_):
def __init__( self : Union[str, Any] , **UpperCamelCase__ : int ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Tuple , UpperCamelCase__ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : List[Any] , **UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {}
if "candidate_labels" in kwargs:
SCREAMING_SNAKE_CASE : Dict = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
SCREAMING_SNAKE_CASE : int = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def __A ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Any="This is a photo of {}." ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = load_image(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(images=[image] , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : Optional[Any] = candidate_labels
SCREAMING_SNAKE_CASE : List[Any] = [hypothesis_template.format(UpperCamelCase__ ) for x in candidate_labels]
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(UpperCamelCase__ , return_tensors=self.framework , padding=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = [text_inputs]
return inputs
def __A ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = model_inputs.pop('''candidate_labels''' )
SCREAMING_SNAKE_CASE : Optional[int] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = text_inputs[0]
else:
# Batching case.
SCREAMING_SNAKE_CASE : int = text_inputs[0][0]
SCREAMING_SNAKE_CASE : Any = self.model(**UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def __A ( self : Dict , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = model_outputs.pop('''candidate_labels''' )
SCREAMING_SNAKE_CASE : str = model_outputs['''logits'''][0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE : List[str] = probs.tolist()
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : str = [scores]
elif self.framework == "tf":
SCREAMING_SNAKE_CASE : Optional[int] = stable_softmax(UpperCamelCase__ , axis=-1 )
SCREAMING_SNAKE_CASE : List[Any] = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
SCREAMING_SNAKE_CASE : Any = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase__ , UpperCamelCase__ ) , key=lambda UpperCamelCase__ : -x[0] )
]
return result
| 702
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = FunnelTokenizer
UpperCamelCase_ = FunnelTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = True
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE : int = '''unwanted, running'''
return input_text, output_text
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
SCREAMING_SNAKE_CASE : int = tokenizer('''UNwant\u00E9d,running''' )
SCREAMING_SNAKE_CASE : Optional[Any] = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 34
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """xmod"""
def __init__( self : Optional[Any] , UpperCamelCase__ : List[Any]=3_0522 , UpperCamelCase__ : Dict=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : List[Any]=12 , UpperCamelCase__ : Union[str, Any]=3072 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=512 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : List[Any]=1E-12 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : int="absolute" , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[int]=("en_XX",) , UpperCamelCase__ : Tuple=None , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = position_embedding_type
SCREAMING_SNAKE_CASE : List[str] = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = classifier_dropout
SCREAMING_SNAKE_CASE : Tuple = pre_norm
SCREAMING_SNAKE_CASE : List[Any] = adapter_reduction_factor
SCREAMING_SNAKE_CASE : List[Any] = adapter_layer_norm
SCREAMING_SNAKE_CASE : Tuple = adapter_reuse_layer_norm
SCREAMING_SNAKE_CASE : Dict = ln_before_adapter
SCREAMING_SNAKE_CASE : Union[str, Any] = list(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = default_language
class lowercase__ ( UpperCamelCase_):
@property
def __A ( self : int ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 703
|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = dataset
SCREAMING_SNAKE_CASE : Optional[Any] = process
SCREAMING_SNAKE_CASE : Union[str, Any] = params
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : List[str] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.dataset[i]
SCREAMING_SNAKE_CASE : Optional[int] = self.process(UpperCamelCase__ , **self.params )
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = loader
SCREAMING_SNAKE_CASE : List[Any] = infer
SCREAMING_SNAKE_CASE : int = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = loader_batch_size
# Internal bookkeeping
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : int = None
def __len__( self : int ):
'''simple docstring'''
return len(self.loader )
def __iter__( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
SCREAMING_SNAKE_CASE : Optional[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
SCREAMING_SNAKE_CASE : Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
SCREAMING_SNAKE_CASE : int = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
SCREAMING_SNAKE_CASE : Tuple = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
SCREAMING_SNAKE_CASE : Any = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
SCREAMING_SNAKE_CASE : Tuple = next(self.iterator )
SCREAMING_SNAKE_CASE : List[Any] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Optional[int] = processed
else:
SCREAMING_SNAKE_CASE : int = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : int = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[Any] = observed_batch_size
# Setting internal index to unwrap the batch
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : int = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = iter(self.loader )
SCREAMING_SNAKE_CASE : List[Any] = None
return self
def __A ( self : List[str] ):
'''simple docstring'''
if self.subiterator is None:
SCREAMING_SNAKE_CASE : Dict = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
SCREAMING_SNAKE_CASE : Any = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
SCREAMING_SNAKE_CASE : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
SCREAMING_SNAKE_CASE : Union[str, Any] = next(self.subiterator )
return processed
class lowercase__ ( UpperCamelCase_):
def __iter__( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Tuple = self.loader_batch_item()
SCREAMING_SNAKE_CASE : Any = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
SCREAMING_SNAKE_CASE : Any = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Tuple = processed
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : List[str] = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[str] = observed_batch_size
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : str = 0
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Any = self.loader_batch_item()
SCREAMING_SNAKE_CASE : List[Any] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
SCREAMING_SNAKE_CASE : int = processed
SCREAMING_SNAKE_CASE : List[str] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
return accumulator
class lowercase__ ( UpperCamelCase_):
def __init__( self : Optional[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = dataset
SCREAMING_SNAKE_CASE : Dict = key
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Dict , UpperCamelCase__ : Tuple ):
'''simple docstring'''
return self.dataset[i][self.key]
class lowercase__ ( UpperCamelCase_):
def __init__( self : List[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataset
SCREAMING_SNAKE_CASE : List[str] = keya
SCREAMING_SNAKE_CASE : Tuple = keya
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 34
| 0
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = (UniPCMultistepScheduler,)
UpperCamelCase_ = (("""num_inference_steps""", 25),)
def __A ( self : Any , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**UpperCamelCase__ )
return config
def __A ( self : Union[str, Any] , UpperCamelCase__ : Any=0 , **UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('''num_inference_steps''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = self.dummy_sample
SCREAMING_SNAKE_CASE : Any = 0.1 * sample
SCREAMING_SNAKE_CASE : str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE : str = self.get_scheduler_config(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE : Optional[Any] = sample, sample
for t in range(UpperCamelCase__ , time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
SCREAMING_SNAKE_CASE : Any = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __A ( self : Optional[Any] , UpperCamelCase__ : List[str]=0 , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''num_inference_steps''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_sample
SCREAMING_SNAKE_CASE : str = 0.1 * sample
SCREAMING_SNAKE_CASE : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
SCREAMING_SNAKE_CASE : str = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
if scheduler is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = scheduler_class(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = 10
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('''num_inference_steps''' , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE : int = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , '''set_timesteps''' ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , '''set_timesteps''' ):
SCREAMING_SNAKE_CASE : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE : Any = [residual + 0.2, residual + 0.15, residual + 0.10]
SCREAMING_SNAKE_CASE : int = dummy_past_residuals[: scheduler.config.solver_order]
SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[5]
SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[6]
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE : Optional[Any] = self.full_loop(scheduler=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
SCREAMING_SNAKE_CASE : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE : Any = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE : Any = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE : str = self.full_loop(scheduler=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def __A ( self : List[str] ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
self.check_over_configs(thresholding=UpperCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCamelCase__ , prediction_type=UpperCamelCase__ , sample_max_value=UpperCamelCase__ , solver_order=UpperCamelCase__ , solver_type=UpperCamelCase__ , )
def __A ( self : Optional[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def __A ( self : Dict ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCamelCase__ , solver_type=UpperCamelCase__ , prediction_type=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = self.full_loop(
solver_order=UpperCamelCase__ , solver_type=UpperCamelCase__ , prediction_type=UpperCamelCase__ , )
assert not torch.isnan(UpperCamelCase__ ).any(), "Samples have nan numbers"
def __A ( self : List[Any] ):
'''simple docstring'''
self.check_over_configs(lower_order_final=UpperCamelCase__ )
self.check_over_configs(lower_order_final=UpperCamelCase__ )
def __A ( self : Optional[Any] ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=0 )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.full_loop()
SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.full_loop(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : List[str] = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.1014 ) < 1E-3
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config(thresholding=UpperCamelCase__ , dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = 10
SCREAMING_SNAKE_CASE : List[str] = self.dummy_model()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : List[str] = model(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 704
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """deberta-v2"""
def __init__( self : Optional[Any] , UpperCamelCase__ : Any=12_8100 , UpperCamelCase__ : Optional[int]=1536 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : List[str]=24 , UpperCamelCase__ : Tuple=6144 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : List[Any]=1E-7 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=-1 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : str="gelu" , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = relative_attention
SCREAMING_SNAKE_CASE : Optional[Any] = max_relative_positions
SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCamelCase__ ) == str:
SCREAMING_SNAKE_CASE : Optional[int] = [x.strip() for x in pos_att_type.lower().split('''|''' )]
SCREAMING_SNAKE_CASE : Any = pos_att_type
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''pooler_hidden_size''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pooler_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = pooler_hidden_act
class lowercase__ ( UpperCamelCase_):
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Dict , UpperCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : "PreTrainedTokenizerBase" = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().generate_dummy_inputs(preprocessor=UpperCamelCase__ , framework=UpperCamelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 34
| 0
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def A ( _lowercase=None , _lowercase=None ):
return field(default_factory=lambda: default , metadata=_lowercase )
@dataclass
class lowercase__ :
UpperCamelCase_ = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
UpperCamelCase_ = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""})
UpperCamelCase_ = list_field(
default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
UpperCamelCase_ = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
UpperCamelCase_ = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
UpperCamelCase_ = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""})
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """Use FP16 to accelerate inference."""})
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """Benchmark training of model"""})
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """Verbose memory tracing"""})
UpperCamelCase_ = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
UpperCamelCase_ = field(
default=UpperCamelCase_ , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """Trace memory line by line"""})
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """Save result to a CSV file"""})
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """Save all print statements in a log file"""})
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """Whether to print environment information"""})
UpperCamelCase_ = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
UpperCamelCase_ = field(
default=f"inference_time_{round(time())}.csv" , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
UpperCamelCase_ = field(
default=f"inference_memory_{round(time())}.csv" , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
UpperCamelCase_ = field(
default=f"train_time_{round(time())}.csv" , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
UpperCamelCase_ = field(
default=f"train_memory_{round(time())}.csv" , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
UpperCamelCase_ = field(
default=f"env_info_{round(time())}.csv" , metadata={"""help""": """CSV filename used if saving environment information."""} , )
UpperCamelCase_ = field(
default=f"log_{round(time())}.csv" , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
UpperCamelCase_ = field(default=3 , metadata={"""help""": """Times an experiment will be run."""})
UpperCamelCase_ = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def __A ( self : Dict ):
'''simple docstring'''
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , UpperCamelCase__ , )
def __A ( self : Any ):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __A ( self : List[str] ):
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def __A ( self : List[Any] ):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 705
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : Any = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Any = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {int(_lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE : Optional[int] = BitConfig(
conv_layer=_lowercase , num_labels=1_000 , idalabel=_lowercase , labelaid=_lowercase , )
return config
def A ( _lowercase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE : str = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''bit.encoder.''' + name
return name
def A ( ):
SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def A ( _lowercase , _lowercase , _lowercase=False ):
SCREAMING_SNAKE_CASE : List[Any] = get_config(_lowercase )
# load original model from timm
SCREAMING_SNAKE_CASE : Optional[Any] = create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE : Optional[int] = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE : str = BitForImageClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# create image processor
SCREAMING_SNAKE_CASE : Optional[Any] = create_transform(**resolve_data_config({} , model=_lowercase ) )
SCREAMING_SNAKE_CASE : List[str] = transform.transforms
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE : Tuple = BitImageProcessor(
do_resize=_lowercase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowercase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = transform(_lowercase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : Optional[int] = processor(_lowercase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_lowercase , _lowercase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE : List[Any] = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 34
| 0
|
import numpy as np
def A ( _lowercase ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__UpperCamelCase : str = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase : int = logging.getLogger()
def A ( ):
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('''-f''' )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
return args.f
def A ( _lowercase , _lowercase="eval" ):
SCREAMING_SNAKE_CASE : Dict = os.path.join(_lowercase , f"""{split}_results.json""" )
if os.path.exists(_lowercase ):
with open(_lowercase , '''r''' ) as f:
return json.load(_lowercase )
raise ValueError(f"""can't find {path}""" )
__UpperCamelCase : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase__ ( UpperCamelCase_):
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Tuple = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_glue.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : str = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_clm_flax.main()
SCREAMING_SNAKE_CASE : Dict = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_summarization_flax.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Dict = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_mlm_flax.main()
SCREAMING_SNAKE_CASE : List[Any] = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_ta_mlm_flax.main()
SCREAMING_SNAKE_CASE : Optional[int] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Any = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_ner.main()
SCREAMING_SNAKE_CASE : List[str] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_qa.main()
SCREAMING_SNAKE_CASE : str = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 34
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Any = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
__UpperCamelCase : Tuple = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
__UpperCamelCase : int = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = SqueezeBertTokenizer
def __init__( self : str , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : List[str]="[UNK]" , UpperCamelCase__ : int="[SEP]" , UpperCamelCase__ : Any="[PAD]" , UpperCamelCase__ : Union[str, Any]="[CLS]" , UpperCamelCase__ : List[Any]="[MASK]" , UpperCamelCase__ : Any=True , UpperCamelCase__ : int=None , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase__ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(UpperCamelCase__ , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Tuple = do_lower_case
SCREAMING_SNAKE_CASE : Dict = strip_accents
SCREAMING_SNAKE_CASE : str = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : Union[str, Any] = normalizer_class(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = do_lower_case
def __A ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 707
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCamelCase : Dict = random.Random()
def A ( _lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ):
if rng is None:
SCREAMING_SNAKE_CASE : Any = global_rng
SCREAMING_SNAKE_CASE : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__ ( unittest.TestCase):
def __init__( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str=7 , UpperCamelCase__ : Any=400 , UpperCamelCase__ : List[str]=2000 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : str=30 , UpperCamelCase__ : Tuple=4_4100 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : str = min_seq_length
SCREAMING_SNAKE_CASE : Dict = max_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : Optional[Any] = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[int] = feature_size
SCREAMING_SNAKE_CASE : Tuple = num_audio_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = chunk_length
SCREAMING_SNAKE_CASE : str = sampling_rate
def __A ( self : Optional[Any] ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __A ( self : Tuple , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
def _flatten(UpperCamelCase__ : str ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
SCREAMING_SNAKE_CASE : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Optional[Any] = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = TvltFeatureExtractor
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = TvltFeatureExtractionTester(self )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''spectrogram_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''feature_size''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''num_audio_channels''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''hop_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''chunk_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''sampling_rate''' ) )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : Optional[int] = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Optional[int] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Tuple = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : int = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Any = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(
UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 , mask_audio=UpperCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE : Dict = np.asarray(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __A ( self : Optional[int] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : Dict = ds.sort('''id''' ).select(range(UpperCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : int = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
SCREAMING_SNAKE_CASE : str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCamelCase__ , atol=1E-4 ) )
| 34
| 0
|
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ :
def __init__( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=13 , UpperCamelCase__ : Any=30 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : List[Any]=5 , UpperCamelCase__ : str=4 , UpperCamelCase__ : Any=37 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Optional[int]=10 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Dict=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : Tuple = image_size
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Any = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE : Dict = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : Optional[int] = num_patches + 1
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_config()
return config, pixel_values, labels
def __A ( self : Optional[Any] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __A ( self : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ViTMSNModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = ViTMSNForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : str = model(UpperCamelCase__ , labels=UpperCamelCase__ )
print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' )
print('''Labels: {labels}''' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : List[Any] = 1
SCREAMING_SNAKE_CASE : int = ViTMSNForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCamelCase_ = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = ViTMSNModelTester(self )
SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMSN does not use inputs_embeds''' )
def __A ( self : str ):
'''simple docstring'''
pass
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = model_class(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def __A ( self : Tuple ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = ViTMSNModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def A ( ):
SCREAMING_SNAKE_CASE : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase):
@cached_property
def __A ( self : Tuple ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None
@slow
def __A ( self : Dict ):
'''simple docstring'''
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE : Optional[int] = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = self.default_image_processor
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**UpperCamelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 708
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_):
UpperCamelCase_ = 1
@register_to_config
def __init__( self : List[str] , UpperCamelCase__ : int = 1000 , UpperCamelCase__ : Optional[Union[np.ndarray, List[float]]] = None ):
'''simple docstring'''
self.set_timesteps(UpperCamelCase__ )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE : str = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
SCREAMING_SNAKE_CASE : Tuple = 4
# running values
SCREAMING_SNAKE_CASE : int = []
def __A ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = num_inference_steps
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
SCREAMING_SNAKE_CASE : Tuple = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
SCREAMING_SNAKE_CASE : int = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sin(steps * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE : Dict = (1.0 - self.betas**2) ** 0.5
SCREAMING_SNAKE_CASE : Optional[Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
SCREAMING_SNAKE_CASE : List[str] = timesteps.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = []
def __A ( self : Tuple , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
SCREAMING_SNAKE_CASE : Optional[int] = (self.timesteps == timestep).nonzero().item()
SCREAMING_SNAKE_CASE : Union[str, Any] = timestep_index + 1
SCREAMING_SNAKE_CASE : int = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase__ )
if len(self.ets ) == 1:
SCREAMING_SNAKE_CASE : Dict = self.ets[-1]
elif len(self.ets ) == 2:
SCREAMING_SNAKE_CASE : Optional[int] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
SCREAMING_SNAKE_CASE : str = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
SCREAMING_SNAKE_CASE : Optional[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
SCREAMING_SNAKE_CASE : Optional[int] = self._get_prev_sample(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : torch.FloatTensor , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return sample
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.alphas[timestep_index]
SCREAMING_SNAKE_CASE : List[str] = self.betas[timestep_index]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.alphas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Tuple = self.betas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Dict = (sample - sigma * ets) / max(UpperCamelCase__ , 1E-8 )
SCREAMING_SNAKE_CASE : Optional[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 34
| 0
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """detr"""
UpperCamelCase_ = ["""past_key_values"""]
UpperCamelCase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : int , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : str=3 , UpperCamelCase__ : Union[str, Any]=100 , UpperCamelCase__ : Dict=6 , UpperCamelCase__ : str=2048 , UpperCamelCase__ : Dict=8 , UpperCamelCase__ : List[str]=6 , UpperCamelCase__ : Dict=2048 , UpperCamelCase__ : List[Any]=8 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int="relu" , UpperCamelCase__ : Optional[Any]=256 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : int=1.0 , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Optional[Any]="sine" , UpperCamelCase__ : List[str]="resnet50" , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : List[Any]=5 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : Tuple=1 , UpperCamelCase__ : str=5 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Tuple=0.1 , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
SCREAMING_SNAKE_CASE : Any = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = backbone_config.get('''model_type''' )
SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE : int = config_class.from_dict(UpperCamelCase__ )
# set timm attributes to None
SCREAMING_SNAKE_CASE : Dict = None, None, None
SCREAMING_SNAKE_CASE : Optional[Any] = use_timm_backbone
SCREAMING_SNAKE_CASE : Tuple = backbone_config
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : List[str] = num_queries
SCREAMING_SNAKE_CASE : Any = d_model
SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = encoder_layers
SCREAMING_SNAKE_CASE : Dict = encoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : Tuple = decoder_layers
SCREAMING_SNAKE_CASE : int = decoder_attention_heads
SCREAMING_SNAKE_CASE : Tuple = dropout
SCREAMING_SNAKE_CASE : Tuple = attention_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = activation_dropout
SCREAMING_SNAKE_CASE : Dict = activation_function
SCREAMING_SNAKE_CASE : List[Any] = init_std
SCREAMING_SNAKE_CASE : Union[str, Any] = init_xavier_std
SCREAMING_SNAKE_CASE : Optional[int] = encoder_layerdrop
SCREAMING_SNAKE_CASE : List[str] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[str] = encoder_layers
SCREAMING_SNAKE_CASE : Optional[int] = auxiliary_loss
SCREAMING_SNAKE_CASE : int = position_embedding_type
SCREAMING_SNAKE_CASE : Optional[Any] = backbone
SCREAMING_SNAKE_CASE : List[str] = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Any = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE : Optional[int] = class_cost
SCREAMING_SNAKE_CASE : str = bbox_cost
SCREAMING_SNAKE_CASE : List[str] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE : List[str] = mask_loss_coefficient
SCREAMING_SNAKE_CASE : Optional[Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE : Dict = bbox_loss_coefficient
SCREAMING_SNAKE_CASE : List[Any] = giou_loss_coefficient
SCREAMING_SNAKE_CASE : Optional[Any] = eos_coefficient
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self : Any ):
'''simple docstring'''
return self.d_model
@classmethod
def __A ( cls : Union[str, Any] , UpperCamelCase__ : PretrainedConfig , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
return cls(backbone_config=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
SCREAMING_SNAKE_CASE : int = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE : Optional[Any] = self.__class__.model_type
return output
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = version.parse("""1.11""")
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
return 1E-5
@property
def __A ( self : int ):
'''simple docstring'''
return 12
| 709
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = IFPipeline
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __A ( self : Tuple ):
'''simple docstring'''
return self._get_dummy_components()
def __A ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : int=0 ):
'''simple docstring'''
if str(UpperCamelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self : List[str] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __A ( self : Any ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __A ( self : List[Any] ):
'''simple docstring'''
self._test_save_load_local()
def __A ( self : List[str] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __A ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE : Tuple = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def A ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 34
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = '▁'
__UpperCamelCase : Tuple = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
__UpperCamelCase : Tuple = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
__UpperCamelCase : Tuple = {'vinai/bartpho-syllable': 1024}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : Tuple="<s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Dict="<pad>" , UpperCamelCase__ : Tuple="<mask>" , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
SCREAMING_SNAKE_CASE : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Optional[int] = vocab_file
SCREAMING_SNAKE_CASE : Any = monolingual_vocab_file
SCREAMING_SNAKE_CASE : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
SCREAMING_SNAKE_CASE : Dict = {}
SCREAMING_SNAKE_CASE : List[Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(UpperCamelCase__ ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE : str = cnt
cnt += 1
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
SCREAMING_SNAKE_CASE : Union[str, Any] = line.strip().split()[0]
SCREAMING_SNAKE_CASE : int = len(self.fairseq_tokens_to_ids )
if str(UpperCamelCase__ ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE : Any = len(self.fairseq_tokens_to_ids )
SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.__dict__.copy()
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __A ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def __A ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __A ( self : int ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self : List[str] , UpperCamelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __A ( self : str , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __A ( self : Any , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ''''''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , ''' ''' ).strip()
return out_string
def __A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : Dict = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
UpperCamelCase__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(UpperCamelCase__ )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 710
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__UpperCamelCase : int = logging.get_logger(__name__)
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
def constraint_to_multiple_of(_lowercase , _lowercase , _lowercase=0 , _lowercase=None ):
SCREAMING_SNAKE_CASE : int = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE : Dict = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE : Optional[Any] = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE : Optional[Any] = (output_size, output_size) if isinstance(_lowercase , _lowercase ) else output_size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = get_image_size(_lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE : Dict = output_height / input_height
SCREAMING_SNAKE_CASE : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE : List[Any] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE : List[Any] = scale_height
SCREAMING_SNAKE_CASE : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = constraint_to_multiple_of(scale_width * input_width , multiple=_lowercase )
return (new_height, new_width)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""pixel_values"""]
def __init__( self : int , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'''height''': 384, '''width''': 384}
SCREAMING_SNAKE_CASE : Any = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : Any = size
SCREAMING_SNAKE_CASE : str = keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(
UpperCamelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCamelCase__ , multiple=UpperCamelCase__ , )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Dict = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Tuple = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Dict = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Any = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Any = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Optional[int] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Tuple] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE : Optional[int] = []
for idx in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : List[Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 34
| 0
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
__UpperCamelCase : Optional[Any] = False
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[int] , UpperCamelCase__ : List[Any]=32 ):
'''simple docstring'''
set_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
SCREAMING_SNAKE_CASE : Tuple = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
SCREAMING_SNAKE_CASE : int = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
SCREAMING_SNAKE_CASE : int = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )]
SCREAMING_SNAKE_CASE : Optional[Any] = [torch.randint(0 , 1000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE : Union[str, Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
SCREAMING_SNAKE_CASE : Tuple = model(UpperCamelCase__ , timesteps[i] ).sample
SCREAMING_SNAKE_CASE : str = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE : int = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
SCREAMING_SNAKE_CASE : Any = model(UpperCamelCase__ , timesteps[i] ).sample
SCREAMING_SNAKE_CASE : Dict = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
| 711
|
import random
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(_lowercase )
elif element > pivot:
greater.append(_lowercase )
else:
equal.append(_lowercase )
return less, equal, greater
def A ( _lowercase , _lowercase ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_lowercase ) or index < 0:
return None
SCREAMING_SNAKE_CASE : Dict = items[random.randint(0 , len(_lowercase ) - 1 )]
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = _partition(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowercase , _lowercase )
# must be in larger
else:
return quick_select(_lowercase , index - (m + count) )
| 34
| 0
|
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = PhobertTokenizer
UpperCamelCase_ = False
def __A ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Dict = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
SCREAMING_SNAKE_CASE : int = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
SCREAMING_SNAKE_CASE : Any = ['''#version: 0.2''', '''l à</w>''']
SCREAMING_SNAKE_CASE : List[Any] = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
def __A ( self : List[Any] , **UpperCamelCase__ : str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''Tôi là VinAI Research'''
SCREAMING_SNAKE_CASE : Optional[int] = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE : str = '''Tôi là VinAI Research'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
SCREAMING_SNAKE_CASE : int = tokenizer.tokenize(UpperCamelCase__ )
print(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : List[str] = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
| 712
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
# TODO Update this
__UpperCamelCase : List[str] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """esm"""
def __init__( self : Tuple , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any=768 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : str=12 , UpperCamelCase__ : Optional[int]=3072 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Union[str, Any]=1026 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = position_embedding_type
SCREAMING_SNAKE_CASE : Any = use_cache
SCREAMING_SNAKE_CASE : Dict = emb_layer_norm_before
SCREAMING_SNAKE_CASE : List[str] = token_dropout
SCREAMING_SNAKE_CASE : List[Any] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
SCREAMING_SNAKE_CASE : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = EsmFoldConfig(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
SCREAMING_SNAKE_CASE : Optional[int] = get_default_vocab_list()
else:
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_list
else:
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = None
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = 0
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Optional[int] ):
'''simple docstring'''
if self.trunk is None:
SCREAMING_SNAKE_CASE : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = TrunkConfig(**self.trunk )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = asdict(self )
SCREAMING_SNAKE_CASE : Tuple = self.trunk.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 48
UpperCamelCase_ = 1_024
UpperCamelCase_ = 128
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = False
UpperCamelCase_ = 4
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Any ):
'''simple docstring'''
if self.structure_module is None:
SCREAMING_SNAKE_CASE : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
SCREAMING_SNAKE_CASE : Dict = self.sequence_state_dim // self.sequence_head_width
SCREAMING_SNAKE_CASE : Tuple = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = asdict(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.structure_module.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 384
UpperCamelCase_ = 128
UpperCamelCase_ = 16
UpperCamelCase_ = 128
UpperCamelCase_ = 12
UpperCamelCase_ = 4
UpperCamelCase_ = 8
UpperCamelCase_ = 0.1
UpperCamelCase_ = 8
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 7
UpperCamelCase_ = 10
UpperCamelCase_ = 1E-8
UpperCamelCase_ = 1E5
def __A ( self : Dict ):
'''simple docstring'''
return asdict(self )
def A ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 34
| 0
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__UpperCamelCase : int = 'src/transformers'
__UpperCamelCase : Optional[Any] = 'docs/source/en'
__UpperCamelCase : Union[str, Any] = '.'
def A ( _lowercase , _lowercase , _lowercase ) -> Dict:
with open(_lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE : Any = f.readlines()
# Find the start prompt.
SCREAMING_SNAKE_CASE : List[Any] = 0
while not lines[start_index].startswith(_lowercase ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE : Tuple = start_index
while not lines[end_index].startswith(_lowercase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__UpperCamelCase : Tuple = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
__UpperCamelCase : Union[str, Any] = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
__UpperCamelCase : Dict = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__UpperCamelCase : Tuple = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
__UpperCamelCase : List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
def A ( _lowercase ) -> str:
SCREAMING_SNAKE_CASE : Tuple = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , _lowercase )
return [m.group(0 ) for m in matches]
def A ( _lowercase , _lowercase ) -> Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = 2 if text == '''✅''' or text == '''❌''' else len(_lowercase )
SCREAMING_SNAKE_CASE : Tuple = (width - text_length) // 2
SCREAMING_SNAKE_CASE : List[str] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def A ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Tuple = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE : List[Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
SCREAMING_SNAKE_CASE : Optional[int] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
SCREAMING_SNAKE_CASE : List[Any] = collections.defaultdict(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = collections.defaultdict(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = collections.defaultdict(_lowercase )
SCREAMING_SNAKE_CASE : Any = collections.defaultdict(_lowercase )
SCREAMING_SNAKE_CASE : int = collections.defaultdict(_lowercase )
# Let's lookup through all transformers object (once).
for attr_name in dir(_lowercase ):
SCREAMING_SNAKE_CASE : Any = None
if attr_name.endswith('''Tokenizer''' ):
SCREAMING_SNAKE_CASE : Optional[int] = slow_tokenizers
SCREAMING_SNAKE_CASE : Any = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
SCREAMING_SNAKE_CASE : str = fast_tokenizers
SCREAMING_SNAKE_CASE : Union[str, Any] = attr_name[:-13]
elif _re_tf_models.match(_lowercase ) is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = tf_models
SCREAMING_SNAKE_CASE : int = _re_tf_models.match(_lowercase ).groups()[0]
elif _re_flax_models.match(_lowercase ) is not None:
SCREAMING_SNAKE_CASE : Any = flax_models
SCREAMING_SNAKE_CASE : str = _re_flax_models.match(_lowercase ).groups()[0]
elif _re_pt_models.match(_lowercase ) is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = pt_models
SCREAMING_SNAKE_CASE : Optional[Any] = _re_pt_models.match(_lowercase ).groups()[0]
if lookup_dict is not None:
while len(_lowercase ) > 0:
if attr_name in model_name_to_prefix.values():
SCREAMING_SNAKE_CASE : Optional[Any] = True
break
# Try again after removing the last word in the name
SCREAMING_SNAKE_CASE : Dict = ''''''.join(camel_case_split(_lowercase )[:-1] )
# Let's build that table!
SCREAMING_SNAKE_CASE : Union[str, Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
SCREAMING_SNAKE_CASE : int = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
SCREAMING_SNAKE_CASE : Optional[Any] = [len(_lowercase ) + 2 for c in columns]
SCREAMING_SNAKE_CASE : str = max([len(_lowercase ) for name in model_names] ) + 2
# Build the table per se
SCREAMING_SNAKE_CASE : Any = '''|''' + '''|'''.join([_center_text(_lowercase , _lowercase ) for c, w in zip(_lowercase , _lowercase )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
SCREAMING_SNAKE_CASE : str = {True: '''✅''', False: '''❌'''}
for name in model_names:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_name_to_prefix[name]
SCREAMING_SNAKE_CASE : List[Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_lowercase , _lowercase ) for l, w in zip(_lowercase , _lowercase )] ) + "|\n"
return table
def A ( _lowercase=False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : int = _find_text_in_file(
filename=os.path.join(_lowercase , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
SCREAMING_SNAKE_CASE : Optional[int] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_lowercase , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__UpperCamelCase : str = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 713
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
])
class lowercase__ ( unittest.TestCase):
def __A ( self : Any ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=UpperCamelCase__ , )
assert hasattr(self , '''env''' )
def __A ( self : str , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
SCREAMING_SNAKE_CASE : Any = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCamelCase__ , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version='''py36''' , )
def __A ( self : Optional[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __A ( self : Tuple , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCamelCase__ )
| 34
| 0
|
from __future__ import annotations
from statistics import mean
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : int = [0] * no_of_processes
SCREAMING_SNAKE_CASE : Optional[Any] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowercase ):
SCREAMING_SNAKE_CASE : List[str] = burst_time[i]
SCREAMING_SNAKE_CASE : list[int] = []
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = -1
for i in range(_lowercase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowercase )
if len(_lowercase ) > 0:
SCREAMING_SNAKE_CASE : Optional[Any] = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
SCREAMING_SNAKE_CASE : str = i
total_time += burst_time[target_process]
completed += 1
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : str = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = [0] * no_of_processes
for i in range(_lowercase ):
SCREAMING_SNAKE_CASE : Any = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
__UpperCamelCase : List[Any] = 4
__UpperCamelCase : Any = [2, 5, 3, 7]
__UpperCamelCase : Union[str, Any] = [0, 0, 0, 0]
__UpperCamelCase : List[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__UpperCamelCase : Optional[int] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
f"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
f"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(f"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(f"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 714
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCamelCase : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCamelCase : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if ' ' in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__UpperCamelCase : Optional[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 34
| 0
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCamelCase : Dict = random.Random()
def A ( _lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ):
if rng is None:
SCREAMING_SNAKE_CASE : Any = global_rng
SCREAMING_SNAKE_CASE : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__ ( unittest.TestCase):
def __init__( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str=7 , UpperCamelCase__ : Any=400 , UpperCamelCase__ : List[str]=2000 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : str=30 , UpperCamelCase__ : Tuple=4_4100 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : str = min_seq_length
SCREAMING_SNAKE_CASE : Dict = max_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : Optional[Any] = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[int] = feature_size
SCREAMING_SNAKE_CASE : Tuple = num_audio_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = chunk_length
SCREAMING_SNAKE_CASE : str = sampling_rate
def __A ( self : Optional[Any] ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __A ( self : Tuple , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
def _flatten(UpperCamelCase__ : str ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
SCREAMING_SNAKE_CASE : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Optional[Any] = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = TvltFeatureExtractor
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = TvltFeatureExtractionTester(self )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''spectrogram_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''feature_size''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''num_audio_channels''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''hop_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''chunk_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''sampling_rate''' ) )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : Optional[int] = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Optional[int] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Tuple = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : int = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Any = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(
UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 , mask_audio=UpperCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE : Dict = np.asarray(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __A ( self : Optional[int] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : Dict = ds.sort('''id''' ).select(range(UpperCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : int = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
SCREAMING_SNAKE_CASE : str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCamelCase__ , atol=1E-4 ) )
| 715
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Dict = None
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Union[str, Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = TaTokenizer
UpperCamelCase_ = []
def __init__( self : str , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]=100 , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE : List[str] = [f"""<extra_id_{i}>""" for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
SCREAMING_SNAKE_CASE : int = len(set(filter(lambda UpperCamelCase__ : bool('''extra_id_''' in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : int = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : str = extra_ids
@staticmethod
def __A ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
SCREAMING_SNAKE_CASE : List[str] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase__ , )
return max_model_length
def __A ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Any = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def __A ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
SCREAMING_SNAKE_CASE : Tuple = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __A ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(r'''<extra_id_\d+>''' , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self : List[Any] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 34
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : Any = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Any = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {int(_lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE : Optional[int] = BitConfig(
conv_layer=_lowercase , num_labels=1_000 , idalabel=_lowercase , labelaid=_lowercase , )
return config
def A ( _lowercase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE : str = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''bit.encoder.''' + name
return name
def A ( ):
SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def A ( _lowercase , _lowercase , _lowercase=False ):
SCREAMING_SNAKE_CASE : List[Any] = get_config(_lowercase )
# load original model from timm
SCREAMING_SNAKE_CASE : Optional[Any] = create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE : Optional[int] = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE : str = BitForImageClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# create image processor
SCREAMING_SNAKE_CASE : Optional[Any] = create_transform(**resolve_data_config({} , model=_lowercase ) )
SCREAMING_SNAKE_CASE : List[str] = transform.transforms
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE : Tuple = BitImageProcessor(
do_resize=_lowercase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowercase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = transform(_lowercase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : Optional[int] = processor(_lowercase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_lowercase , _lowercase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE : List[Any] = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 716
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCamelCase : str = False
class lowercase__ ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = VersatileDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = '''cyberpunk 2077'''
SCREAMING_SNAKE_CASE : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe.dual_guided(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe.text_to_image(
prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.image_variation(UpperCamelCase__ , generator=UpperCamelCase__ , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 34
| 0
|
'''simple docstring'''
def A ( _lowercase ): # noqa: E741
SCREAMING_SNAKE_CASE : str = len(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Any = [0] * n
SCREAMING_SNAKE_CASE : Dict = [False] * n
SCREAMING_SNAKE_CASE : Optional[Any] = [False] * n
def dfs(_lowercase , _lowercase , _lowercase , _lowercase ):
if parent == root:
out_edge_count += 1
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Optional[Any] = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
SCREAMING_SNAKE_CASE : Optional[int] = dfs(_lowercase , _lowercase , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE : Dict = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
SCREAMING_SNAKE_CASE : str = True
# AP found via cycle
if at == low[to]:
SCREAMING_SNAKE_CASE : int = True
else:
SCREAMING_SNAKE_CASE : List[str] = min(low[at] , _lowercase )
return out_edge_count
for i in range(_lowercase ):
if not visited[i]:
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Dict = dfs(_lowercase , _lowercase , -1 , _lowercase )
SCREAMING_SNAKE_CASE : str = out_edge_count > 1
for x in range(len(_lowercase ) ):
if is_art[x] is True:
print(_lowercase )
# Adjacency list of graph
__UpperCamelCase : str = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 717
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def A ( _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = analyze_text(_lowercase )
SCREAMING_SNAKE_CASE : Any = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
SCREAMING_SNAKE_CASE : Tuple = sum(single_char_strings.values() )
# one length string
SCREAMING_SNAKE_CASE : Tuple = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
SCREAMING_SNAKE_CASE : Tuple = single_char_strings[ch]
SCREAMING_SNAKE_CASE : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
SCREAMING_SNAKE_CASE : Optional[Any] = sum(two_char_strings.values() )
SCREAMING_SNAKE_CASE : List[str] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
SCREAMING_SNAKE_CASE : Union[str, Any] = cha + cha
if sequence in two_char_strings:
SCREAMING_SNAKE_CASE : Any = two_char_strings[sequence]
SCREAMING_SNAKE_CASE : Dict = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = Counter() # type: ignore
SCREAMING_SNAKE_CASE : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def A ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 34
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowercase__ ( unittest.TestCase):
def __init__( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any]=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : str=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Tuple=99 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : List[str]=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Tuple=37 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : Tuple=4 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : str = use_attention_mask
SCREAMING_SNAKE_CASE : List[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : int = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCamelCase__ , )
return config, input_ids, attention_mask
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDistilBertModelTester(self )
@slow
def __A ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = model_class_name.from_pretrained('''distilbert-base-uncased''' )
SCREAMING_SNAKE_CASE : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
@require_flax
class lowercase__ ( unittest.TestCase):
@slow
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
SCREAMING_SNAKE_CASE : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE : Any = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE : Tuple = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
SCREAMING_SNAKE_CASE : Tuple = (1, 11, 768)
self.assertEqual(output.shape , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1E-4 ) )
| 718
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Tuple = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34
| 0
|
def A ( _lowercase = 1_000_000 ):
SCREAMING_SNAKE_CASE : Dict = set(range(3 , _lowercase , 2 ) )
primes.add(2 )
for p in range(3 , _lowercase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _lowercase , _lowercase ) ) )
SCREAMING_SNAKE_CASE : Optional[Any] = [float(_lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowercase , limit + 1 , _lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 719
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Tuple = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['MaskFormerFeatureExtractor']
__UpperCamelCase : List[Any] = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
__UpperCamelCase : Union[str, Any] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 34
| 0
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Dict = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
__UpperCamelCase : Any = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
__UpperCamelCase : List[str] = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
@lru_cache()
def A ( ):
SCREAMING_SNAKE_CASE : Dict = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
SCREAMING_SNAKE_CASE : Dict = bs[:]
SCREAMING_SNAKE_CASE : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowercase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE : str = [chr(_lowercase ) for n in cs]
return dict(zip(_lowercase , _lowercase ) )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = set()
SCREAMING_SNAKE_CASE : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE : List[Any] = char
return pairs
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]="replace" , UpperCamelCase__ : Any="<s>" , UpperCamelCase__ : List[str]="</s>" , UpperCamelCase__ : Optional[Any]="</s>" , UpperCamelCase__ : Union[str, Any]="<s>" , UpperCamelCase__ : Dict="<unk>" , UpperCamelCase__ : Union[str, Any]="<pad>" , UpperCamelCase__ : List[str]="<mask>" , UpperCamelCase__ : int=False , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
SCREAMING_SNAKE_CASE : int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
SCREAMING_SNAKE_CASE : Dict = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
SCREAMING_SNAKE_CASE : int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
SCREAMING_SNAKE_CASE : List[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
SCREAMING_SNAKE_CASE : int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE : Union[str, Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
with open(UpperCamelCase__ , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE : Any = json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE : Any = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE : List[str] = bytes_to_unicode()
SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase__ , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE : Optional[int] = merges_handle.read().split('''\n''' )[1:-1]
SCREAMING_SNAKE_CASE : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE : Dict = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def __A ( self : Dict ):
'''simple docstring'''
return len(self.encoder )
def __A ( self : str ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self : str , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE : str = tuple(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE : Union[str, Any] = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE : Dict = bigram
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while i < len(UpperCamelCase__ ):
try:
SCREAMING_SNAKE_CASE : Union[str, Any] = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE : int = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE : Any = tuple(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
SCREAMING_SNAKE_CASE : List[Any] = get_pairs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = ''' '''.join(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = word
return word
def __A ( self : Tuple , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
for token in re.findall(self.pat , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[str] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(''' ''' ) )
return bpe_tokens
def __A ( self : List[Any] , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def __A ( self : Dict , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return self.decoder.get(UpperCamelCase__ )
def __A ( self : str , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''.join(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Any = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : Dict = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '''\n''' )
SCREAMING_SNAKE_CASE : List[Any] = 0
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
SCREAMING_SNAKE_CASE : Optional[Any] = token_index
writer.write(''' '''.join(UpperCamelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __A ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def __A ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : int=False , **UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE : Union[str, Any] = ''' ''' + text
return (text, kwargs)
| 720
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCamelCase : Dict = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def A ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = True
while ask_again:
SCREAMING_SNAKE_CASE : Optional[Any] = input(_lowercase )
try:
if default is not None and len(_lowercase ) == 0:
return default
return convert_value(_lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowercase )
def A ( _lowercase , _lowercase=[] , _lowercase=None , _lowercase=0 ):
SCREAMING_SNAKE_CASE : Dict = BulletMenu(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : str = menu.run(default_choice=_lowercase )
return convert_value(_lowercase ) if convert_value is not None else result
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = int(_lowercase )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def A ( _lowercase ):
return {"yes": True, "no": False}[value.lower()]
class lowercase__ ( argparse.RawDescriptionHelpFormatter):
def __A ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = super()._format_usage(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 34
| 0
|
from __future__ import annotations
__UpperCamelCase : Union[str, Any] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ):
SCREAMING_SNAKE_CASE : Any = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowercase ) )
] # the reference grid
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : int = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowercase ) )
] # the action grid
SCREAMING_SNAKE_CASE : Any = init[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = init[1]
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE : Any = [[f, g, x, y]]
SCREAMING_SNAKE_CASE : List[Any] = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE : Tuple = False # flag set if we can't find expand
while not found and not resign:
if len(_lowercase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE : List[str] = cell.pop()
SCREAMING_SNAKE_CASE : Tuple = next_cell[2]
SCREAMING_SNAKE_CASE : Dict = next_cell[3]
SCREAMING_SNAKE_CASE : List[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE : Tuple = True
else:
for i in range(len(_lowercase ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE : Union[str, Any] = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE : Tuple = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowercase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = g + cost
SCREAMING_SNAKE_CASE : Any = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : Optional[int] = i
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[str] = goal[0]
SCREAMING_SNAKE_CASE : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE : Dict = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE : List[str] = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE : Optional[Any] = xa
SCREAMING_SNAKE_CASE : Dict = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE : List[str] = []
for i in range(len(_lowercase ) ):
path.append(invpath[len(_lowercase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCamelCase : List[str] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCamelCase : Union[str, Any] = [0, 0]
# all coordinates are given in format [y,x]
__UpperCamelCase : Optional[Any] = [len(grid) - 1, len(grid[0]) - 1]
__UpperCamelCase : Union[str, Any] = 1
# the cost map which pushes the path closer to the goal
__UpperCamelCase : List[Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCamelCase : Union[str, Any] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCamelCase : str = 99
__UpperCamelCase : List[Any] = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 721
|
from __future__ import annotations
from typing import Any
class lowercase__ ( UpperCamelCase_):
pass
class lowercase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = data
SCREAMING_SNAKE_CASE : Node | None = None
def __iter__( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self
SCREAMING_SNAKE_CASE : Tuple = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase__ )
yield node.data
SCREAMING_SNAKE_CASE : Dict = node.next_node
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__UpperCamelCase : List[Any] = Node(1)
__UpperCamelCase : str = Node(2)
__UpperCamelCase : Dict = Node(3)
__UpperCamelCase : List[Any] = Node(4)
print(root_node.has_loop) # False
__UpperCamelCase : int = root_node.next_node
print(root_node.has_loop) # True
__UpperCamelCase : Union[str, Any] = Node(5)
__UpperCamelCase : Union[str, Any] = Node(6)
__UpperCamelCase : List[Any] = Node(5)
__UpperCamelCase : List[str] = Node(6)
print(root_node.has_loop) # False
__UpperCamelCase : List[Any] = Node(1)
print(root_node.has_loop) # False
| 34
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """openai/whisper-base"""
UpperCamelCase_ = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCamelCase_ = """transcriber"""
UpperCamelCase_ = WhisperProcessor
UpperCamelCase_ = WhisperForConditionalGeneration
UpperCamelCase_ = ["""audio"""]
UpperCamelCase_ = ["""text"""]
def __A ( self : Optional[Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
return self.pre_processor(UpperCamelCase__ , return_tensors='''pt''' ).input_features
def __A ( self : Any , UpperCamelCase__ : int ):
'''simple docstring'''
return self.model.generate(inputs=UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
return self.pre_processor.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )[0]
| 700
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=64 , UpperCamelCase__ : Optional[Any]=4_8000 , UpperCamelCase__ : Tuple=480 , UpperCamelCase__ : Union[str, Any]=10 , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : int=False , UpperCamelCase__ : float = 0 , UpperCamelCase__ : float = 1_4000 , UpperCamelCase__ : int = None , UpperCamelCase__ : str = "fusion" , UpperCamelCase__ : str = "repeatpad" , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = top_db
SCREAMING_SNAKE_CASE : Union[str, Any] = truncation
SCREAMING_SNAKE_CASE : str = padding
SCREAMING_SNAKE_CASE : List[Any] = fft_window_size
SCREAMING_SNAKE_CASE : Tuple = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE : List[str] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = max_length_s
SCREAMING_SNAKE_CASE : Tuple = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE : List[Any] = sampling_rate
SCREAMING_SNAKE_CASE : List[str] = frequency_min
SCREAMING_SNAKE_CASE : Any = frequency_max
SCREAMING_SNAKE_CASE : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm=UpperCamelCase__ , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm='''slaney''' , mel_scale='''slaney''' , )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __A ( self : Optional[int] , UpperCamelCase__ : np.array , UpperCamelCase__ : Optional[np.array] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = spectrogram(
UpperCamelCase__ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase__ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : Any = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE : List[Any] = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE : int = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE : Optional[int] = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE : Optional[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE : Tuple = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE : str = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase__ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __A ( self : Dict , UpperCamelCase__ : np.array , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE : Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ ) - max_length
SCREAMING_SNAKE_CASE : Dict = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
SCREAMING_SNAKE_CASE : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE : List[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE : Tuple = False
else:
SCREAMING_SNAKE_CASE : str = self._random_mel_fusion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
SCREAMING_SNAKE_CASE : List[str] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE : Tuple = int(max_length / len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Any = np.stack(np.tile(UpperCamelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE : List[Any] = int(max_length / len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = np.stack(np.tile(UpperCamelCase__ , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = np.pad(UpperCamelCase__ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE : List[Any] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE : List[str] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__ : str = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE : List[str] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE : List[str] = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Any = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE : List[Any] = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : List[str] = [np.asarray(UpperCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE : int = [
self._get_input_mel(UpperCamelCase__ , max_length if max_length else self.nb_max_samples , UpperCamelCase__ , UpperCamelCase__ )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[str] = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase__ )
is_longer.append(UpperCamelCase__ )
if truncation == "fusion" and sum(UpperCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = True
if isinstance(input_mel[0] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE : Optional[Any] = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE : int = BatchFeature(UpperCamelCase__ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : int = input_features.convert_to_tensors(UpperCamelCase__ )
return input_features
| 34
| 0
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A ( ):
SCREAMING_SNAKE_CASE : Union[str, Any] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=_lowercase , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=_lowercase , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=_lowercase )
return parser.parse_args()
def A ( ):
SCREAMING_SNAKE_CASE : List[Any] = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE : Optional[int] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE : Tuple = script_fpath.stem
SCREAMING_SNAKE_CASE : List[Any] = importlib.import_module(_lowercase )
# Patch sys.argv
SCREAMING_SNAKE_CASE : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 701
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """layoutlmv3"""
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any]=5_0265 , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Tuple=3072 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=512 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[Any]=1E-5 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : int=0 , UpperCamelCase__ : str=2 , UpperCamelCase__ : List[str]=1024 , UpperCamelCase__ : str=128 , UpperCamelCase__ : str=128 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[int]=32 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Optional[Any]=64 , UpperCamelCase__ : Dict=256 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Dict=224 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
vocab_size=UpperCamelCase__ , hidden_size=UpperCamelCase__ , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , intermediate_size=UpperCamelCase__ , hidden_act=UpperCamelCase__ , hidden_dropout_prob=UpperCamelCase__ , attention_probs_dropout_prob=UpperCamelCase__ , max_position_embeddings=UpperCamelCase__ , type_vocab_size=UpperCamelCase__ , initializer_range=UpperCamelCase__ , layer_norm_eps=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : List[str] = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : str = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Union[str, Any] = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Union[str, Any] = text_embed
SCREAMING_SNAKE_CASE : List[str] = visual_embed
SCREAMING_SNAKE_CASE : Optional[Any] = input_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = patch_size
SCREAMING_SNAKE_CASE : List[Any] = classifier_dropout
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = version.parse("""1.12""")
@property
def __A ( self : str ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __A ( self : int ):
'''simple docstring'''
return 1E-5
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Optional[Any] , UpperCamelCase__ : "ProcessorMixin" , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , UpperCamelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : List[Any] = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : Any = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = dict(
processor(
UpperCamelCase__ , text=UpperCamelCase__ , boxes=UpperCamelCase__ , return_tensors=UpperCamelCase__ , ) )
return inputs
| 34
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Tuple = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 702
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = FunnelTokenizer
UpperCamelCase_ = FunnelTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = True
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE : int = '''unwanted, running'''
return input_text, output_text
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
SCREAMING_SNAKE_CASE : int = tokenizer('''UNwant\u00E9d,running''' )
SCREAMING_SNAKE_CASE : Optional[Any] = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 34
| 0
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowercase__ :
def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=13 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Tuple=6 , UpperCamelCase__ : int=17 , UpperCamelCase__ : List[str]=23 , UpperCamelCase__ : Optional[Any]=11 , UpperCamelCase__ : List[Any]=True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : int = act_dim
SCREAMING_SNAKE_CASE : Any = state_dim
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = max_length
SCREAMING_SNAKE_CASE : List[Any] = is_training
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
SCREAMING_SNAKE_CASE : Dict = floats_tensor((self.batch_size, self.seq_length, 1) )
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) )
SCREAMING_SNAKE_CASE : str = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
SCREAMING_SNAKE_CASE : List[str] = random_attention_mask((self.batch_size, self.seq_length) )
SCREAMING_SNAKE_CASE : List[str] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __A ( self : Dict ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __A ( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DecisionTransformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE
) : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = (DecisionTransformerModel,) if is_torch_available() else ()
UpperCamelCase_ = ()
UpperCamelCase_ = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
UpperCamelCase_ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = DecisionTransformerModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def __A ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@slow
def __A ( self : Optional[Any] ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[str] = DecisionTransformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : str = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
@require_torch
class lowercase__ ( unittest.TestCase):
@slow
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 2 # number of steps of autoregressive prediction we will perform
SCREAMING_SNAKE_CASE : Tuple = 10 # defined by the RL environment, may be normalized
SCREAMING_SNAKE_CASE : Optional[int] = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
SCREAMING_SNAKE_CASE : Any = model.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = model.config
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCamelCase__ , dtype=torch.floataa ) # env.reset()
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
SCREAMING_SNAKE_CASE : Optional[int] = state
SCREAMING_SNAKE_CASE : Any = torch.zeros(1 , 0 , config.act_dim , device=UpperCamelCase__ , dtype=torch.floataa )
SCREAMING_SNAKE_CASE : int = torch.zeros(1 , 0 , device=UpperCamelCase__ , dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Any = torch.tensor(0 , device=UpperCamelCase__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : str = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCamelCase__ )] , dim=1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCamelCase__ )] , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(
states=UpperCamelCase__ , actions=UpperCamelCase__ , rewards=UpperCamelCase__ , returns_to_go=UpperCamelCase__ , timesteps=UpperCamelCase__ , attention_mask=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
SCREAMING_SNAKE_CASE : str = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCamelCase__ , dtype=torch.floataa ),
1.0,
False,
{},
)
SCREAMING_SNAKE_CASE : Optional[Any] = action_pred[0, -1]
SCREAMING_SNAKE_CASE : Tuple = torch.cat([states, state] , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = returns_to_go[0, -1] - reward
SCREAMING_SNAKE_CASE : List[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCamelCase__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 703
|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = dataset
SCREAMING_SNAKE_CASE : Optional[Any] = process
SCREAMING_SNAKE_CASE : Union[str, Any] = params
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : List[str] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.dataset[i]
SCREAMING_SNAKE_CASE : Optional[int] = self.process(UpperCamelCase__ , **self.params )
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = loader
SCREAMING_SNAKE_CASE : List[Any] = infer
SCREAMING_SNAKE_CASE : int = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = loader_batch_size
# Internal bookkeeping
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : int = None
def __len__( self : int ):
'''simple docstring'''
return len(self.loader )
def __iter__( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
SCREAMING_SNAKE_CASE : Optional[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
SCREAMING_SNAKE_CASE : Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
SCREAMING_SNAKE_CASE : int = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
SCREAMING_SNAKE_CASE : Tuple = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
SCREAMING_SNAKE_CASE : Any = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
SCREAMING_SNAKE_CASE : Tuple = next(self.iterator )
SCREAMING_SNAKE_CASE : List[Any] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Optional[int] = processed
else:
SCREAMING_SNAKE_CASE : int = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : int = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[Any] = observed_batch_size
# Setting internal index to unwrap the batch
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : int = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = iter(self.loader )
SCREAMING_SNAKE_CASE : List[Any] = None
return self
def __A ( self : List[str] ):
'''simple docstring'''
if self.subiterator is None:
SCREAMING_SNAKE_CASE : Dict = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
SCREAMING_SNAKE_CASE : Any = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
SCREAMING_SNAKE_CASE : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
SCREAMING_SNAKE_CASE : Union[str, Any] = next(self.subiterator )
return processed
class lowercase__ ( UpperCamelCase_):
def __iter__( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Tuple = self.loader_batch_item()
SCREAMING_SNAKE_CASE : Any = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
SCREAMING_SNAKE_CASE : Any = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Tuple = processed
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : List[str] = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[str] = observed_batch_size
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : str = 0
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Any = self.loader_batch_item()
SCREAMING_SNAKE_CASE : List[Any] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
SCREAMING_SNAKE_CASE : int = processed
SCREAMING_SNAKE_CASE : List[str] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
return accumulator
class lowercase__ ( UpperCamelCase_):
def __init__( self : Optional[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = dataset
SCREAMING_SNAKE_CASE : Dict = key
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Dict , UpperCamelCase__ : Tuple ):
'''simple docstring'''
return self.dataset[i][self.key]
class lowercase__ ( UpperCamelCase_):
def __init__( self : List[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataset
SCREAMING_SNAKE_CASE : List[str] = keya
SCREAMING_SNAKE_CASE : Tuple = keya
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 34
| 0
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def A ( _lowercase , _lowercase = "cpu" , _lowercase = None ):
SCREAMING_SNAKE_CASE : str = torch.load(_lowercase , map_location=_lowercase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowercase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
SCREAMING_SNAKE_CASE : Any = v.half()
if save_path is None: # overwrite src_path
SCREAMING_SNAKE_CASE : str = src_path
torch.save(_lowercase , _lowercase )
if __name__ == "__main__":
fire.Fire(convert)
| 704
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """deberta-v2"""
def __init__( self : Optional[Any] , UpperCamelCase__ : Any=12_8100 , UpperCamelCase__ : Optional[int]=1536 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : List[str]=24 , UpperCamelCase__ : Tuple=6144 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : List[Any]=1E-7 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=-1 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : str="gelu" , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = relative_attention
SCREAMING_SNAKE_CASE : Optional[Any] = max_relative_positions
SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCamelCase__ ) == str:
SCREAMING_SNAKE_CASE : Optional[int] = [x.strip() for x in pos_att_type.lower().split('''|''' )]
SCREAMING_SNAKE_CASE : Any = pos_att_type
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''pooler_hidden_size''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pooler_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = pooler_hidden_act
class lowercase__ ( UpperCamelCase_):
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Dict , UpperCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : "PreTrainedTokenizerBase" = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().generate_dummy_inputs(preprocessor=UpperCamelCase__ , framework=UpperCamelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 34
| 0
|
import inspect
import unittest
class lowercase__ ( unittest.TestCase):
def __A ( self : str ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def __A ( self : int ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE : List[str] = inspect.getmembers(UpperCamelCase__ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE : int = '''k-diffusion'''
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE : Any = '''invisible-watermark'''
assert backend in deps, f"""{backend} is not in the deps table!"""
| 705
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : Any = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Any = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {int(_lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE : Optional[int] = BitConfig(
conv_layer=_lowercase , num_labels=1_000 , idalabel=_lowercase , labelaid=_lowercase , )
return config
def A ( _lowercase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE : str = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''bit.encoder.''' + name
return name
def A ( ):
SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def A ( _lowercase , _lowercase , _lowercase=False ):
SCREAMING_SNAKE_CASE : List[Any] = get_config(_lowercase )
# load original model from timm
SCREAMING_SNAKE_CASE : Optional[Any] = create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE : Optional[int] = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE : str = BitForImageClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# create image processor
SCREAMING_SNAKE_CASE : Optional[Any] = create_transform(**resolve_data_config({} , model=_lowercase ) )
SCREAMING_SNAKE_CASE : List[str] = transform.transforms
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE : Tuple = BitImageProcessor(
do_resize=_lowercase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowercase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = transform(_lowercase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : Optional[int] = processor(_lowercase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_lowercase , _lowercase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE : List[Any] = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 34
| 0
|
import operator
def A ( _lowercase , _lowercase = False , _lowercase = None ):
SCREAMING_SNAKE_CASE : int = operator.lt if reverse else operator.gt
SCREAMING_SNAKE_CASE : Tuple = solution or []
if not arr:
return solution
SCREAMING_SNAKE_CASE : Union[str, Any] = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase , sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
SCREAMING_SNAKE_CASE : Tuple = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase , _lowercase ):
solution.insert(_lowercase , _lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase , _lowercase , _lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 706
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__UpperCamelCase : str = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase : int = logging.getLogger()
def A ( ):
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('''-f''' )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
return args.f
def A ( _lowercase , _lowercase="eval" ):
SCREAMING_SNAKE_CASE : Dict = os.path.join(_lowercase , f"""{split}_results.json""" )
if os.path.exists(_lowercase ):
with open(_lowercase , '''r''' ) as f:
return json.load(_lowercase )
raise ValueError(f"""can't find {path}""" )
__UpperCamelCase : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase__ ( UpperCamelCase_):
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Tuple = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_glue.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : str = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_clm_flax.main()
SCREAMING_SNAKE_CASE : Dict = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_summarization_flax.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Dict = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_mlm_flax.main()
SCREAMING_SNAKE_CASE : List[Any] = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_ta_mlm_flax.main()
SCREAMING_SNAKE_CASE : Optional[int] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Any = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_ner.main()
SCREAMING_SNAKE_CASE : List[str] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_qa.main()
SCREAMING_SNAKE_CASE : str = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 34
| 0
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowercase__ ( UpperCamelCase_):
def __A ( self : Optional[int] , UpperCamelCase__ : float ):
'''simple docstring'''
return 0.0
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
SCREAMING_SNAKE_CASE : Dict = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = 512
SCREAMING_SNAKE_CASE : Dict = [1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE : Dict = [filter_type.process(_lowercase ) for item in inputs]
SCREAMING_SNAKE_CASE : List[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE : Optional[int] = np.abs(np.fft.fft(_lowercase ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = 20 * np.logaa(_lowercase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
SCREAMING_SNAKE_CASE : str = get_bounds(_lowercase , _lowercase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(_lowercase )
plt.show()
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = 512
SCREAMING_SNAKE_CASE : Any = [1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE : Optional[Any] = [filter_type.process(_lowercase ) for item in inputs]
SCREAMING_SNAKE_CASE : Any = [0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE : Union[str, Any] = np.angle(np.fft.fft(_lowercase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(_lowercase , -2 * pi ) )
plt.show()
| 707
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCamelCase : Dict = random.Random()
def A ( _lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ):
if rng is None:
SCREAMING_SNAKE_CASE : Any = global_rng
SCREAMING_SNAKE_CASE : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__ ( unittest.TestCase):
def __init__( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str=7 , UpperCamelCase__ : Any=400 , UpperCamelCase__ : List[str]=2000 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : str=30 , UpperCamelCase__ : Tuple=4_4100 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : str = min_seq_length
SCREAMING_SNAKE_CASE : Dict = max_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : Optional[Any] = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[int] = feature_size
SCREAMING_SNAKE_CASE : Tuple = num_audio_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = chunk_length
SCREAMING_SNAKE_CASE : str = sampling_rate
def __A ( self : Optional[Any] ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __A ( self : Tuple , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
def _flatten(UpperCamelCase__ : str ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
SCREAMING_SNAKE_CASE : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Optional[Any] = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = TvltFeatureExtractor
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = TvltFeatureExtractionTester(self )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''spectrogram_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''feature_size''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''num_audio_channels''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''hop_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''chunk_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''sampling_rate''' ) )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : Optional[int] = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Optional[int] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Tuple = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : int = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Any = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(
UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 , mask_audio=UpperCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE : Dict = np.asarray(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __A ( self : Optional[int] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : Dict = ds.sort('''id''' ).select(range(UpperCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : int = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
SCREAMING_SNAKE_CASE : str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCamelCase__ , atol=1E-4 ) )
| 34
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_)
class lowercase__ ( UpperCamelCase_):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
UpperCamelCase_ = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True})
UpperCamelCase_ = Features({"""text""": Value("""string""")})
UpperCamelCase_ = Features({"""summary""": Value("""string""")})
UpperCamelCase_ = """text"""
UpperCamelCase_ = """summary"""
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 708
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_):
UpperCamelCase_ = 1
@register_to_config
def __init__( self : List[str] , UpperCamelCase__ : int = 1000 , UpperCamelCase__ : Optional[Union[np.ndarray, List[float]]] = None ):
'''simple docstring'''
self.set_timesteps(UpperCamelCase__ )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE : str = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
SCREAMING_SNAKE_CASE : Tuple = 4
# running values
SCREAMING_SNAKE_CASE : int = []
def __A ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = num_inference_steps
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
SCREAMING_SNAKE_CASE : Tuple = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
SCREAMING_SNAKE_CASE : int = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sin(steps * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE : Dict = (1.0 - self.betas**2) ** 0.5
SCREAMING_SNAKE_CASE : Optional[Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
SCREAMING_SNAKE_CASE : List[str] = timesteps.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = []
def __A ( self : Tuple , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
SCREAMING_SNAKE_CASE : Optional[int] = (self.timesteps == timestep).nonzero().item()
SCREAMING_SNAKE_CASE : Union[str, Any] = timestep_index + 1
SCREAMING_SNAKE_CASE : int = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase__ )
if len(self.ets ) == 1:
SCREAMING_SNAKE_CASE : Dict = self.ets[-1]
elif len(self.ets ) == 2:
SCREAMING_SNAKE_CASE : Optional[int] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
SCREAMING_SNAKE_CASE : str = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
SCREAMING_SNAKE_CASE : Optional[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
SCREAMING_SNAKE_CASE : Optional[int] = self._get_prev_sample(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : torch.FloatTensor , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return sample
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.alphas[timestep_index]
SCREAMING_SNAKE_CASE : List[str] = self.betas[timestep_index]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.alphas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Tuple = self.betas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Dict = (sample - sigma * ets) / max(UpperCamelCase__ , 1E-8 )
SCREAMING_SNAKE_CASE : Optional[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 34
| 0
|
def A ( _lowercase ):
if n == 1 or not isinstance(_lowercase , _lowercase ):
return 0
elif n == 2:
return 1
else:
SCREAMING_SNAKE_CASE : Tuple = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Optional[int] = 2
while digits < n:
index += 1
SCREAMING_SNAKE_CASE : Optional[Any] = len(str(fibonacci(_lowercase ) ) )
return index
def A ( _lowercase = 1_000 ):
return fibonacci_digits_index(_lowercase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 709
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = IFPipeline
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __A ( self : Tuple ):
'''simple docstring'''
return self._get_dummy_components()
def __A ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : int=0 ):
'''simple docstring'''
if str(UpperCamelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self : List[str] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __A ( self : Any ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __A ( self : List[Any] ):
'''simple docstring'''
self._test_save_load_local()
def __A ( self : List[str] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __A ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE : Tuple = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def A ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 34
| 0
|
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = WavaVecaPhonemeCTCTokenizer
UpperCamelCase_ = False
def __A ( self : List[Any] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[int] = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
SCREAMING_SNAKE_CASE : Tuple = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
SCREAMING_SNAKE_CASE : Dict = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
def __A ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Dict=20 , UpperCamelCase__ : Optional[int]=5 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ )) for i in range(len(UpperCamelCase__ ) )]
SCREAMING_SNAKE_CASE : str = list(filter(lambda UpperCamelCase__ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=UpperCamelCase__ ) , UpperCamelCase__ ) )
if max_length is not None and len(UpperCamelCase__ ) > max_length:
SCREAMING_SNAKE_CASE : int = toks[:max_length]
if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0:
while len(UpperCamelCase__ ) < min_length:
SCREAMING_SNAKE_CASE : int = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE : Dict = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
if " " not in output_txt and len(UpperCamelCase__ ) > 1:
SCREAMING_SNAKE_CASE : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE : Tuple = ''' ''' + output_txt
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
return output_txt, output_ids
def __A ( self : Tuple , **UpperCamelCase__ : Dict ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer('''m xxx ɪ''' , do_phonemize=UpperCamelCase__ ).input_ids
self.assertEqual(UpperCamelCase__ , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
SCREAMING_SNAKE_CASE : List[str] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=UpperCamelCase__ ).input_ids
self.assertEqual(UpperCamelCase__ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
SCREAMING_SNAKE_CASE : Dict = tokenizer('''maɪ c''' , do_phonemize=UpperCamelCase__ ).input_ids
self.assertEqual(UpperCamelCase__ , [3, 200] ) # mai should be <unk> (=3)
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE : int = '''Hello how are you'''
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
self.assertEqual(UpperCamelCase__ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE : List[Any] = '''Hello how are you'''
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(UpperCamelCase__ ).input_ids , tokenizer(UpperCamelCase__ , do_phonemize=UpperCamelCase__ ).input_ids )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE : Dict = '''Hello how are you'''
SCREAMING_SNAKE_CASE : int = tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode(tokenizer(UpperCamelCase__ ).input_ids )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE : List[str] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
SCREAMING_SNAKE_CASE : Any = tokenizer.decode(sample_ids[0] )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch_tokens[0] )
self.assertEqual(UpperCamelCase__ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = '''Hello how are you'''
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
self.assertEqual(UpperCamelCase__ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
SCREAMING_SNAKE_CASE : Dict = '''Hello how are you'''
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(UpperCamelCase__ ).input_ids , tokenizer(UpperCamelCase__ , do_phonemize=UpperCamelCase__ ).input_ids )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
SCREAMING_SNAKE_CASE : str = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
SCREAMING_SNAKE_CASE : Any = tokenizer.decode(sample_ids[0] )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch_tokens[0] )
self.assertEqual(UpperCamelCase__ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
SCREAMING_SNAKE_CASE : int = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(UpperCamelCase__ , filter_word_delimiter_token=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch_tokens[0] )
self.assertEqual(UpperCamelCase__ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
SCREAMING_SNAKE_CASE : int = '''Hello how are you'''
SCREAMING_SNAKE_CASE : str = tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
SCREAMING_SNAKE_CASE : int = tokenizer.decode(tokenizer(UpperCamelCase__ ).input_ids , filter_word_delimiter_token=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
SCREAMING_SNAKE_CASE : Tuple = '''Hello how are you'''
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.decode(tokenizer(UpperCamelCase__ ).input_ids , filter_word_delimiter_token=UpperCamelCase__ )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , UpperCamelCase__ )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = '''Hello how are you'''
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(UpperCamelCase__ , phonemizer_lang='''en-us''' ).input_ids
SCREAMING_SNAKE_CASE : Tuple = tokenizer(UpperCamelCase__ , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(UpperCamelCase__ , '''ɛ l o h aʊ a ʁ j u''' )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE : Dict = '''Hello how Are you'''
SCREAMING_SNAKE_CASE : str = '''hello how are you'''
SCREAMING_SNAKE_CASE : Tuple = tokenizer(UpperCamelCase__ ).input_ids
SCREAMING_SNAKE_CASE : Dict = tokenizer(UpperCamelCase__ ).input_ids
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
SCREAMING_SNAKE_CASE : Dict = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
SCREAMING_SNAKE_CASE : str = tokenizer.batch_decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def __A ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
SCREAMING_SNAKE_CASE : Any = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(UpperCamelCase__ , output_char_offsets=UpperCamelCase__ , filter_word_delimiter_token=UpperCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertTrue(isinstance(outputs_list[0] , UpperCamelCase__ ) )
# transform list to ModelOutput
SCREAMING_SNAKE_CASE : Any = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(UpperCamelCase__ : Any , UpperCamelCase__ : Dict ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
[recursive_check(UpperCamelCase__ , UpperCamelCase__ ) for la, la in zip(UpperCamelCase__ , UpperCamelCase__ )]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
SCREAMING_SNAKE_CASE : str = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.batch_decode(UpperCamelCase__ , output_char_offsets=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = [tokenizer.decode(UpperCamelCase__ , output_char_offsets=UpperCamelCase__ ) for ids in sample_ids]
check_list_tuples_equal(UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def __A ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def __A ( self : Any ):
'''simple docstring'''
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def __A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def __A ( self : Any ):
'''simple docstring'''
pass
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.vocab_size
SCREAMING_SNAKE_CASE : List[str] = len(UpperCamelCase__ )
self.assertNotEqual(UpperCamelCase__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
SCREAMING_SNAKE_CASE : Dict = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
SCREAMING_SNAKE_CASE : List[str] = tokenizer.add_tokens(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.vocab_size
SCREAMING_SNAKE_CASE : Tuple = len(UpperCamelCase__ )
self.assertNotEqual(UpperCamelCase__ , 0 )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , len(UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , all_size + len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=UpperCamelCase__ )
self.assertGreaterEqual(len(UpperCamelCase__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.add_special_tokens(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.vocab_size
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
self.assertNotEqual(UpperCamelCase__ , 0 )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , len(UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , all_size_a + len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=UpperCamelCase__ )
self.assertGreaterEqual(len(UpperCamelCase__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def __A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def __A ( self : List[str] ):
'''simple docstring'''
pass
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
SCREAMING_SNAKE_CASE : Tuple = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertIsInstance(output['''text'''] , UpperCamelCase__ )
| 710
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__UpperCamelCase : int = logging.get_logger(__name__)
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
def constraint_to_multiple_of(_lowercase , _lowercase , _lowercase=0 , _lowercase=None ):
SCREAMING_SNAKE_CASE : int = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE : Dict = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE : Optional[Any] = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE : Optional[Any] = (output_size, output_size) if isinstance(_lowercase , _lowercase ) else output_size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = get_image_size(_lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE : Dict = output_height / input_height
SCREAMING_SNAKE_CASE : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE : List[Any] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE : List[Any] = scale_height
SCREAMING_SNAKE_CASE : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = constraint_to_multiple_of(scale_width * input_width , multiple=_lowercase )
return (new_height, new_width)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""pixel_values"""]
def __init__( self : int , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'''height''': 384, '''width''': 384}
SCREAMING_SNAKE_CASE : Any = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : Any = size
SCREAMING_SNAKE_CASE : str = keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(
UpperCamelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCamelCase__ , multiple=UpperCamelCase__ , )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Dict = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Tuple = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Dict = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Any = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Any = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Optional[int] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Tuple] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE : Optional[int] = []
for idx in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : List[Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 34
| 0
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class lowercase__ ( unittest.TestCase):
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(UpperCamelCase__ ) for s in shape] )}.npy"""
def __A ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __A ( self : Dict , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : Union[str, Any]=(4, 4, 64, 64) , UpperCamelCase__ : Optional[Any]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE : Any = jnp.array(load_hf_numpy(self.get_file_format(UpperCamelCase__ , UpperCamelCase__ ) ) , dtype=UpperCamelCase__ )
return image
def __A ( self : Optional[int] , UpperCamelCase__ : Dict=False , UpperCamelCase__ : List[Any]="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE : Any = '''bf16''' if fpaa else None
SCREAMING_SNAKE_CASE : List[Any] = FlaxUNetaDConditionModel.from_pretrained(
UpperCamelCase__ , subfolder='''unet''' , dtype=UpperCamelCase__ , revision=UpperCamelCase__ )
return model, params
def __A ( self : List[Any] , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Dict=(4, 77, 768) , UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE : List[Any] = jnp.array(load_hf_numpy(self.get_file_format(UpperCamelCase__ , UpperCamelCase__ ) ) , dtype=UpperCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def __A ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = self.get_latents(UpperCamelCase__ , fpaa=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.get_encoder_hidden_states(UpperCamelCase__ , fpaa=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = model.apply(
{'''params''': params} , UpperCamelCase__ , jnp.array(UpperCamelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=UpperCamelCase__ , ).sample
assert sample.shape == latents.shape
SCREAMING_SNAKE_CASE : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : str = jnp.array(UpperCamelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def __A ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.get_latents(UpperCamelCase__ , shape=(4, 4, 96, 96) , fpaa=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_encoder_hidden_states(UpperCamelCase__ , shape=(4, 77, 1024) , fpaa=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.apply(
{'''params''': params} , UpperCamelCase__ , jnp.array(UpperCamelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=UpperCamelCase__ , ).sample
assert sample.shape == latents.shape
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.array(UpperCamelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-2 )
| 711
|
import random
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(_lowercase )
elif element > pivot:
greater.append(_lowercase )
else:
equal.append(_lowercase )
return less, equal, greater
def A ( _lowercase , _lowercase ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_lowercase ) or index < 0:
return None
SCREAMING_SNAKE_CASE : Dict = items[random.randint(0 , len(_lowercase ) - 1 )]
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = _partition(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowercase , _lowercase )
# must be in larger
else:
return quick_select(_lowercase , index - (m + count) )
| 34
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def A ( _lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
SCREAMING_SNAKE_CASE : Any = True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE : Optional[int] = True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE : Optional[Any] = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
SCREAMING_SNAKE_CASE : List[str] = [3, 3, 3, 3]
SCREAMING_SNAKE_CASE : List[str] = [5, 5, 5, 5]
elif "fl4" in model_name:
SCREAMING_SNAKE_CASE : Any = [4, 4, 4, 4]
SCREAMING_SNAKE_CASE : str = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
SCREAMING_SNAKE_CASE : Optional[Any] = [3, 3, 3, 3]
if "lrf" in model_name:
SCREAMING_SNAKE_CASE : Tuple = [3, 3, 3, 3]
else:
SCREAMING_SNAKE_CASE : Any = [2, 2, 2, 2]
if "tiny" in model_name:
SCREAMING_SNAKE_CASE : Tuple = 96
elif "small" in model_name:
SCREAMING_SNAKE_CASE : List[Any] = 96
elif "base" in model_name:
SCREAMING_SNAKE_CASE : Tuple = 128
elif "large" in model_name:
SCREAMING_SNAKE_CASE : Optional[int] = 192
elif "xlarge" in model_name:
SCREAMING_SNAKE_CASE : Tuple = 256
elif "huge" in model_name:
SCREAMING_SNAKE_CASE : List[str] = 352
# set label information
SCREAMING_SNAKE_CASE : Dict = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
SCREAMING_SNAKE_CASE : Any = '''imagenet-22k-id2label.json'''
else:
SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Dict = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : Dict = {int(_lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[Any] = FocalNetConfig(
embed_dim=_lowercase , depths=_lowercase , focal_levels=_lowercase , focal_windows=_lowercase , use_conv_embed=_lowercase , idalabel=_lowercase , labelaid=_lowercase , use_post_layernorm=_lowercase , use_layerscale=_lowercase , )
return config
def A ( _lowercase ):
'''simple docstring'''
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
SCREAMING_SNAKE_CASE : int = '''encoder.''' + name
if "encoder.layers" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE : int = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
SCREAMING_SNAKE_CASE : str = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
SCREAMING_SNAKE_CASE : Union[str, Any] = '''layernorm.weight'''
if name == "norm.bias":
SCREAMING_SNAKE_CASE : Tuple = '''layernorm.bias'''
if "head" in name:
SCREAMING_SNAKE_CASE : str = name.replace('''head''' , '''classifier''' )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''focalnet.''' + name
return name
def A ( _lowercase , _lowercase , _lowercase=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
SCREAMING_SNAKE_CASE : Optional[Any] = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , _lowercase )
SCREAMING_SNAKE_CASE : Dict = torch.hub.load_state_dict_from_url(_lowercase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(_lowercase )
SCREAMING_SNAKE_CASE : Any = val
SCREAMING_SNAKE_CASE : Any = get_focalnet_config(_lowercase )
SCREAMING_SNAKE_CASE : Dict = FocalNetForImageClassification(_lowercase )
model.eval()
# load state dict
model.load_state_dict(_lowercase )
# verify conversion
SCREAMING_SNAKE_CASE : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : str = BitImageProcessor(
do_resize=_lowercase , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_lowercase , crop_size=224 , do_normalize=_lowercase , image_mean=_lowercase , image_std=_lowercase , )
SCREAMING_SNAKE_CASE : str = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
SCREAMING_SNAKE_CASE : str = processor(images=_lowercase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : List[Any] = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
SCREAMING_SNAKE_CASE : Optional[int] = image_transforms(_lowercase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _lowercase , atol=1e-4 )
SCREAMING_SNAKE_CASE : Dict = model(**_lowercase )
SCREAMING_SNAKE_CASE : Tuple = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
__UpperCamelCase : Dict = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 712
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
# TODO Update this
__UpperCamelCase : List[str] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """esm"""
def __init__( self : Tuple , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any=768 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : str=12 , UpperCamelCase__ : Optional[int]=3072 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Union[str, Any]=1026 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = position_embedding_type
SCREAMING_SNAKE_CASE : Any = use_cache
SCREAMING_SNAKE_CASE : Dict = emb_layer_norm_before
SCREAMING_SNAKE_CASE : List[str] = token_dropout
SCREAMING_SNAKE_CASE : List[Any] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
SCREAMING_SNAKE_CASE : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = EsmFoldConfig(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
SCREAMING_SNAKE_CASE : Optional[int] = get_default_vocab_list()
else:
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_list
else:
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = None
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = 0
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Optional[int] ):
'''simple docstring'''
if self.trunk is None:
SCREAMING_SNAKE_CASE : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = TrunkConfig(**self.trunk )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = asdict(self )
SCREAMING_SNAKE_CASE : Tuple = self.trunk.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 48
UpperCamelCase_ = 1_024
UpperCamelCase_ = 128
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = False
UpperCamelCase_ = 4
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Any ):
'''simple docstring'''
if self.structure_module is None:
SCREAMING_SNAKE_CASE : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
SCREAMING_SNAKE_CASE : Dict = self.sequence_state_dim // self.sequence_head_width
SCREAMING_SNAKE_CASE : Tuple = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = asdict(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.structure_module.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 384
UpperCamelCase_ = 128
UpperCamelCase_ = 16
UpperCamelCase_ = 128
UpperCamelCase_ = 12
UpperCamelCase_ = 4
UpperCamelCase_ = 8
UpperCamelCase_ = 0.1
UpperCamelCase_ = 8
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 7
UpperCamelCase_ = 10
UpperCamelCase_ = 1E-8
UpperCamelCase_ = 1E5
def __A ( self : Dict ):
'''simple docstring'''
return asdict(self )
def A ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 34
| 0
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class lowercase__ ( UpperCamelCase_):
def __init__( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[int]=None , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str ):
'''simple docstring'''
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
if config is None:
assert isinstance(self.model , UpperCamelCase__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
SCREAMING_SNAKE_CASE : List[Any] = self.model.config
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = config
SCREAMING_SNAKE_CASE : List[Any] = data_args
SCREAMING_SNAKE_CASE : str = self.config.tgt_vocab_size if isinstance(self.config , UpperCamelCase__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
''' padding..''' )
if self.args.label_smoothing == 0:
SCREAMING_SNAKE_CASE : int = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
SCREAMING_SNAKE_CASE : List[str] = label_smoothed_nll_loss
def __A ( self : Any , UpperCamelCase__ : int ):
'''simple docstring'''
if self.optimizer is None:
SCREAMING_SNAKE_CASE : str = ['''bias''', '''LayerNorm.weight''']
SCREAMING_SNAKE_CASE : Optional[int] = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
SCREAMING_SNAKE_CASE : int = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
SCREAMING_SNAKE_CASE : str = Adafactor
SCREAMING_SNAKE_CASE : Optional[Any] = {'''scale_parameter''': False, '''relative_step''': False}
else:
SCREAMING_SNAKE_CASE : Optional[int] = AdamW
SCREAMING_SNAKE_CASE : Any = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
SCREAMING_SNAKE_CASE : Dict = self.args.learning_rate
if self.sharded_ddp:
SCREAMING_SNAKE_CASE : Any = OSS(
params=UpperCamelCase__ , optim=UpperCamelCase__ , **UpperCamelCase__ , )
else:
SCREAMING_SNAKE_CASE : Any = optimizer_cls(UpperCamelCase__ , **UpperCamelCase__ )
if self.lr_scheduler is None:
SCREAMING_SNAKE_CASE : List[Any] = self._get_lr_scheduler(UpperCamelCase__ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
SCREAMING_SNAKE_CASE : List[str] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
SCREAMING_SNAKE_CASE : Optional[Any] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
SCREAMING_SNAKE_CASE : str = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=UpperCamelCase__ )
return scheduler
def __A ( self : Any ):
'''simple docstring'''
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __A ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
SCREAMING_SNAKE_CASE : List[str] = model(**UpperCamelCase__ , use_cache=UpperCamelCase__ )[0]
SCREAMING_SNAKE_CASE : List[str] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**UpperCamelCase__ , labels=UpperCamelCase__ , use_cache=UpperCamelCase__ )[:2]
else:
# compute label smoothed loss
SCREAMING_SNAKE_CASE : List[str] = model(**UpperCamelCase__ , use_cache=UpperCamelCase__ )[0]
SCREAMING_SNAKE_CASE : int = torch.nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
SCREAMING_SNAKE_CASE : int = self.loss_fn(UpperCamelCase__ , UpperCamelCase__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def __A ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = inputs.pop('''labels''' )
SCREAMING_SNAKE_CASE : List[Any] = self._compute_loss(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return loss
def __A ( self : str , UpperCamelCase__ : nn.Module , UpperCamelCase__ : Dict[str, Union[torch.Tensor, Any]] , UpperCamelCase__ : bool , UpperCamelCase__ : Optional[List[str]] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_inputs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
SCREAMING_SNAKE_CASE : Optional[Any] = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **UpperCamelCase__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
SCREAMING_SNAKE_CASE : Optional[Any] = self._pad_tensors_to_max_len(UpperCamelCase__ , gen_kwargs['''max_length'''] )
SCREAMING_SNAKE_CASE : Tuple = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
SCREAMING_SNAKE_CASE : int = self._compute_loss(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
SCREAMING_SNAKE_CASE : int = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
SCREAMING_SNAKE_CASE : Dict = self._pad_tensors_to_max_len(UpperCamelCase__ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def __A ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f""" padded to `max_length`={max_length}""" )
SCREAMING_SNAKE_CASE : List[Any] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
SCREAMING_SNAKE_CASE : Tuple = tensor
return padded_tensor
| 713
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
])
class lowercase__ ( unittest.TestCase):
def __A ( self : Any ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=UpperCamelCase__ , )
assert hasattr(self , '''env''' )
def __A ( self : str , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
SCREAMING_SNAKE_CASE : Any = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCamelCase__ , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version='''py36''' , )
def __A ( self : Optional[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __A ( self : Tuple , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCamelCase__ )
| 34
| 0
|
def A ( _lowercase = 100 ):
SCREAMING_SNAKE_CASE : Optional[int] = set()
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : str = n + 1 # maximum limit
for a in range(2 , _lowercase ):
for b in range(2 , _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = a**b # calculates the current power
collect_powers.add(_lowercase ) # adds the result to the set
return len(_lowercase )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 714
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCamelCase : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCamelCase : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if ' ' in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__UpperCamelCase : Optional[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 34
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True})
UpperCamelCase_ = Features({"""image""": Image()})
UpperCamelCase_ = Features({"""labels""": ClassLabel})
UpperCamelCase_ = """image"""
UpperCamelCase_ = """labels"""
def __A ( self : str , UpperCamelCase__ : List[str] ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , UpperCamelCase__ ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self )
SCREAMING_SNAKE_CASE : Any = self.label_schema.copy()
SCREAMING_SNAKE_CASE : Optional[Any] = features[self.label_column]
SCREAMING_SNAKE_CASE : Union[str, Any] = label_schema
return task_template
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 715
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Dict = None
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Union[str, Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = TaTokenizer
UpperCamelCase_ = []
def __init__( self : str , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]=100 , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE : List[str] = [f"""<extra_id_{i}>""" for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
SCREAMING_SNAKE_CASE : int = len(set(filter(lambda UpperCamelCase__ : bool('''extra_id_''' in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : int = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : str = extra_ids
@staticmethod
def __A ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
SCREAMING_SNAKE_CASE : List[str] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase__ , )
return max_model_length
def __A ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Any = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def __A ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
SCREAMING_SNAKE_CASE : Tuple = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __A ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(r'''<extra_id_\d+>''' , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self : List[Any] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 34
| 0
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = CTRLTokenizer
UpperCamelCase_ = False
UpperCamelCase_ = False
def __A ( self : Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Optional[Any] = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
SCREAMING_SNAKE_CASE : str = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
SCREAMING_SNAKE_CASE : Any = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
SCREAMING_SNAKE_CASE : Dict = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
def __A ( self : List[str] , **UpperCamelCase__ : Dict ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''adapt react readapt apt'''
return input_text, output_text
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Any = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE : Optional[int] = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
| 716
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCamelCase : str = False
class lowercase__ ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = VersatileDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = '''cyberpunk 2077'''
SCREAMING_SNAKE_CASE : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe.dual_guided(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe.text_to_image(
prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.image_variation(UpperCamelCase__ , generator=UpperCamelCase__ , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 34
| 0
|
'''simple docstring'''
def A ( _lowercase ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
__UpperCamelCase : Dict = int(input('Enter number: ').strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 717
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def A ( _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = analyze_text(_lowercase )
SCREAMING_SNAKE_CASE : Any = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
SCREAMING_SNAKE_CASE : Tuple = sum(single_char_strings.values() )
# one length string
SCREAMING_SNAKE_CASE : Tuple = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
SCREAMING_SNAKE_CASE : Tuple = single_char_strings[ch]
SCREAMING_SNAKE_CASE : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
SCREAMING_SNAKE_CASE : Optional[Any] = sum(two_char_strings.values() )
SCREAMING_SNAKE_CASE : List[str] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
SCREAMING_SNAKE_CASE : Union[str, Any] = cha + cha
if sequence in two_char_strings:
SCREAMING_SNAKE_CASE : Any = two_char_strings[sequence]
SCREAMING_SNAKE_CASE : Dict = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = Counter() # type: ignore
SCREAMING_SNAKE_CASE : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def A ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 34
| 0
|
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__UpperCamelCase : Tuple = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowercase__ ( nn.Module):
def __init__( self : Tuple , UpperCamelCase__ : Dict ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = torchvision.models.resnetaaa(pretrained=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = list(model.children() )[:-2]
SCREAMING_SNAKE_CASE : List[Any] = nn.Sequential(*UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def __A ( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.pool(self.model(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = torch.flatten(UpperCamelCase__ , start_dim=2 )
SCREAMING_SNAKE_CASE : str = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowercase__ ( UpperCamelCase_):
def __init__( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [json.loads(UpperCamelCase__ ) for l in open(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE : Dict = os.path.dirname(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = tokenizer
SCREAMING_SNAKE_CASE : Tuple = labels
SCREAMING_SNAKE_CASE : Tuple = len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = max_seq_length
SCREAMING_SNAKE_CASE : int = transforms
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : str , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[int] = sentence[0], sentence[1:-1], sentence[-1]
SCREAMING_SNAKE_CASE : List[str] = sentence[: self.max_seq_length]
SCREAMING_SNAKE_CASE : Optional[Any] = torch.zeros(self.n_classes )
SCREAMING_SNAKE_CASE : List[str] = 1
SCREAMING_SNAKE_CASE : Dict = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' )
SCREAMING_SNAKE_CASE : Any = self.transforms(UpperCamelCase__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = [len(row['''sentence'''] ) for row in batch]
SCREAMING_SNAKE_CASE : List[str] = len(_lowercase ), max(_lowercase )
SCREAMING_SNAKE_CASE : List[str] = torch.zeros(_lowercase , _lowercase , dtype=torch.long )
SCREAMING_SNAKE_CASE : int = torch.zeros(_lowercase , _lowercase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_lowercase , _lowercase ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = input_row['''sentence''']
SCREAMING_SNAKE_CASE : str = 1
SCREAMING_SNAKE_CASE : Dict = torch.stack([row['''image'''] for row in batch] )
SCREAMING_SNAKE_CASE : int = torch.stack([row['''label'''] for row in batch] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([row['''image_start_token'''] for row in batch] )
SCREAMING_SNAKE_CASE : Dict = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def A ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def A ( ):
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
] )
| 718
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Tuple = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34
| 0
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''ylacombe/bark-small'''
SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = '''en_speaker_1'''
SCREAMING_SNAKE_CASE : str = '''This is a test string'''
SCREAMING_SNAKE_CASE : int = '''speaker_embeddings_path.json'''
SCREAMING_SNAKE_CASE : List[Any] = '''speaker_embeddings'''
def __A ( self : str , **UpperCamelCase__ : int ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCamelCase__ )
def __A ( self : Optional[int] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : int = BarkProcessor(tokenizer=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
SCREAMING_SNAKE_CASE : Tuple = 35
SCREAMING_SNAKE_CASE : Optional[int] = 2
SCREAMING_SNAKE_CASE : int = 8
SCREAMING_SNAKE_CASE : List[str] = {
'''semantic_prompt''': np.ones(UpperCamelCase__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = processor(text=self.input_string , voice_preset=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE : str = BarkProcessor(tokenizer=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = processor(text=self.input_string )
SCREAMING_SNAKE_CASE : Any = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 719
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Tuple = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['MaskFormerFeatureExtractor']
__UpperCamelCase : List[Any] = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
__UpperCamelCase : Union[str, Any] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 34
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class lowercase__ ( unittest.TestCase):
def __init__( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=7 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : str=18 , UpperCamelCase__ : Dict=30 , UpperCamelCase__ : Any=400 , UpperCamelCase__ : str=True , UpperCamelCase__ : str=32 , UpperCamelCase__ : str=True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = image_size
SCREAMING_SNAKE_CASE : Any = min_resolution
SCREAMING_SNAKE_CASE : Union[str, Any] = max_resolution
SCREAMING_SNAKE_CASE : int = do_resize
SCREAMING_SNAKE_CASE : Dict = size_divisor
SCREAMING_SNAKE_CASE : Dict = do_rescale
def __A ( self : Tuple ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = GLPNImageProcessor if is_vision_available() else None
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = GLPNImageProcessingTester(self )
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''size_divisor''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''resample''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_rescale''' ) )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
SCREAMING_SNAKE_CASE : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
SCREAMING_SNAKE_CASE : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 720
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCamelCase : Dict = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def A ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = True
while ask_again:
SCREAMING_SNAKE_CASE : Optional[Any] = input(_lowercase )
try:
if default is not None and len(_lowercase ) == 0:
return default
return convert_value(_lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowercase )
def A ( _lowercase , _lowercase=[] , _lowercase=None , _lowercase=0 ):
SCREAMING_SNAKE_CASE : Dict = BulletMenu(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : str = menu.run(default_choice=_lowercase )
return convert_value(_lowercase ) if convert_value is not None else result
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = int(_lowercase )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def A ( _lowercase ):
return {"yes": True, "no": False}[value.lower()]
class lowercase__ ( argparse.RawDescriptionHelpFormatter):
def __A ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = super()._format_usage(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 34
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
])
class lowercase__ ( unittest.TestCase):
def __A ( self : Any ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=UpperCamelCase__ , )
assert hasattr(self , '''env''' )
def __A ( self : str , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
SCREAMING_SNAKE_CASE : Any = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCamelCase__ , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version='''py36''' , )
def __A ( self : Optional[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __A ( self : Tuple , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCamelCase__ )
| 721
|
from __future__ import annotations
from typing import Any
class lowercase__ ( UpperCamelCase_):
pass
class lowercase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = data
SCREAMING_SNAKE_CASE : Node | None = None
def __iter__( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self
SCREAMING_SNAKE_CASE : Tuple = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase__ )
yield node.data
SCREAMING_SNAKE_CASE : Dict = node.next_node
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__UpperCamelCase : List[Any] = Node(1)
__UpperCamelCase : str = Node(2)
__UpperCamelCase : Dict = Node(3)
__UpperCamelCase : List[Any] = Node(4)
print(root_node.has_loop) # False
__UpperCamelCase : int = root_node.next_node
print(root_node.has_loop) # True
__UpperCamelCase : Union[str, Any] = Node(5)
__UpperCamelCase : Union[str, Any] = Node(6)
__UpperCamelCase : List[Any] = Node(5)
__UpperCamelCase : List[str] = Node(6)
print(root_node.has_loop) # False
__UpperCamelCase : List[Any] = Node(1)
print(root_node.has_loop) # False
| 34
| 0
|
__UpperCamelCase : List[str] = 9.8_0665
def A ( _lowercase , _lowercase , _lowercase = g ):
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 700
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=64 , UpperCamelCase__ : Optional[Any]=4_8000 , UpperCamelCase__ : Tuple=480 , UpperCamelCase__ : Union[str, Any]=10 , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : int=False , UpperCamelCase__ : float = 0 , UpperCamelCase__ : float = 1_4000 , UpperCamelCase__ : int = None , UpperCamelCase__ : str = "fusion" , UpperCamelCase__ : str = "repeatpad" , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = top_db
SCREAMING_SNAKE_CASE : Union[str, Any] = truncation
SCREAMING_SNAKE_CASE : str = padding
SCREAMING_SNAKE_CASE : List[Any] = fft_window_size
SCREAMING_SNAKE_CASE : Tuple = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE : List[str] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = max_length_s
SCREAMING_SNAKE_CASE : Tuple = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE : List[Any] = sampling_rate
SCREAMING_SNAKE_CASE : List[str] = frequency_min
SCREAMING_SNAKE_CASE : Any = frequency_max
SCREAMING_SNAKE_CASE : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm=UpperCamelCase__ , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm='''slaney''' , mel_scale='''slaney''' , )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __A ( self : Optional[int] , UpperCamelCase__ : np.array , UpperCamelCase__ : Optional[np.array] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = spectrogram(
UpperCamelCase__ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase__ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : Any = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE : List[Any] = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE : int = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE : Optional[int] = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE : Optional[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE : Tuple = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE : str = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase__ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __A ( self : Dict , UpperCamelCase__ : np.array , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE : Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ ) - max_length
SCREAMING_SNAKE_CASE : Dict = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
SCREAMING_SNAKE_CASE : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE : List[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE : Tuple = False
else:
SCREAMING_SNAKE_CASE : str = self._random_mel_fusion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
SCREAMING_SNAKE_CASE : List[str] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE : Tuple = int(max_length / len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Any = np.stack(np.tile(UpperCamelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE : List[Any] = int(max_length / len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = np.stack(np.tile(UpperCamelCase__ , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = np.pad(UpperCamelCase__ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE : List[Any] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE : List[str] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__ : str = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE : List[str] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE : List[str] = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Any = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE : List[Any] = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : List[str] = [np.asarray(UpperCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE : int = [
self._get_input_mel(UpperCamelCase__ , max_length if max_length else self.nb_max_samples , UpperCamelCase__ , UpperCamelCase__ )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[str] = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase__ )
is_longer.append(UpperCamelCase__ )
if truncation == "fusion" and sum(UpperCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = True
if isinstance(input_mel[0] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE : Optional[Any] = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE : int = BatchFeature(UpperCamelCase__ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : int = input_features.convert_to_tensors(UpperCamelCase__ )
return input_features
| 34
| 0
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = 42
class lowercase__ ( nn.Module):
def __init__( self : List[Any] , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Union[str, Any]=("DownEncoderBlock2D",) , UpperCamelCase__ : Tuple=(64,) , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : str="silu" , UpperCamelCase__ : int=True , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : int = layers_per_block
SCREAMING_SNAKE_CASE : Dict = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.ModuleList([] )
# down
SCREAMING_SNAKE_CASE : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[int] = output_channel
SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i]
SCREAMING_SNAKE_CASE : Tuple = i == len(UpperCamelCase__ ) - 1
SCREAMING_SNAKE_CASE : Any = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
SCREAMING_SNAKE_CASE : Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
SCREAMING_SNAKE_CASE : Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1E-6 )
SCREAMING_SNAKE_CASE : Optional[int] = nn.SiLU()
SCREAMING_SNAKE_CASE : int = 2 * out_channels if double_z else out_channels
SCREAMING_SNAKE_CASE : int = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : List[Any] = False
def __A ( self : Tuple , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = x
SCREAMING_SNAKE_CASE : Tuple = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : Union[str, Any] ):
def custom_forward(*UpperCamelCase__ : List[str] ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version('''>=''' , '''1.11.0''' ):
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
SCREAMING_SNAKE_CASE : Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = down_block(UpperCamelCase__ )
# middle
SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(UpperCamelCase__ )
# post-process
SCREAMING_SNAKE_CASE : Dict = self.conv_norm_out(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_act(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = self.conv_out(UpperCamelCase__ )
return sample
class lowercase__ ( nn.Module):
def __init__( self : List[str] , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Any=("UpDecoderBlock2D",) , UpperCamelCase__ : Union[str, Any]=(64,) , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Optional[int]=32 , UpperCamelCase__ : List[Any]="silu" , UpperCamelCase__ : Optional[Any]="group" , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = layers_per_block
SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Tuple = nn.ModuleList([] )
SCREAMING_SNAKE_CASE : int = in_channels if norm_type == '''spatial''' else None
# mid
SCREAMING_SNAKE_CASE : Any = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
SCREAMING_SNAKE_CASE : int = list(reversed(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : List[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[str] = output_channel
SCREAMING_SNAKE_CASE : Optional[int] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : int = i == len(UpperCamelCase__ ) - 1
SCREAMING_SNAKE_CASE : Dict = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = output_channel
# out
if norm_type == "spatial":
SCREAMING_SNAKE_CASE : List[str] = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : List[Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1E-6 )
SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU()
SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Any = False
def __A ( self : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = z
SCREAMING_SNAKE_CASE : Any = self.conv_in(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : Optional[int] ):
def custom_forward(*UpperCamelCase__ : Dict ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version('''>=''' , '''1.11.0''' ):
# middle
SCREAMING_SNAKE_CASE : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
SCREAMING_SNAKE_CASE : str = self.conv_norm_out(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = self.conv_act(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_out(UpperCamelCase__ )
return sample
class lowercase__ ( nn.Module):
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple="random" , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Optional[Any]=True ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = n_e
SCREAMING_SNAKE_CASE : Tuple = vq_embed_dim
SCREAMING_SNAKE_CASE : Union[str, Any] = beta
SCREAMING_SNAKE_CASE : List[str] = legacy
SCREAMING_SNAKE_CASE : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
SCREAMING_SNAKE_CASE : str = remap
if self.remap is not None:
self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.used.shape[0]
SCREAMING_SNAKE_CASE : List[str] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
SCREAMING_SNAKE_CASE : List[str] = self.re_embed
SCREAMING_SNAKE_CASE : Tuple = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
SCREAMING_SNAKE_CASE : Dict = n_e
SCREAMING_SNAKE_CASE : int = sane_index_shape
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = inds.shape
assert len(UpperCamelCase__ ) > 1
SCREAMING_SNAKE_CASE : Optional[int] = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : int = self.used.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 )
SCREAMING_SNAKE_CASE : List[Any] = match.sum(2 ) < 1
if self.unknown_index == "random":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
SCREAMING_SNAKE_CASE : str = self.unknown_index
return new.reshape(UpperCamelCase__ )
def __A ( self : List[str] , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape
assert len(UpperCamelCase__ ) > 1
SCREAMING_SNAKE_CASE : List[Any] = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
SCREAMING_SNAKE_CASE : Any = 0 # simply set to zero
SCREAMING_SNAKE_CASE : Any = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def __A ( self : int , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = z.permute(0 , 2 , 3 , 1 ).contiguous()
SCREAMING_SNAKE_CASE : Any = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
SCREAMING_SNAKE_CASE : Dict = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = self.embedding(UpperCamelCase__ ).view(z.shape )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Any = None
# compute loss for embedding
if not self.legacy:
SCREAMING_SNAKE_CASE : Optional[int] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
SCREAMING_SNAKE_CASE : List[Any] = z + (z_q - z).detach()
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : Optional[Any] = self.remap_to_used(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
SCREAMING_SNAKE_CASE : List[Any] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __A ( self : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
if self.remap is not None:
SCREAMING_SNAKE_CASE : Any = indices.reshape(shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[str] = self.unmap_to_all(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
SCREAMING_SNAKE_CASE : Any = self.embedding(UpperCamelCase__ )
if shape is not None:
SCREAMING_SNAKE_CASE : List[Any] = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : Dict = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class lowercase__ ( UpperCamelCase_):
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = parameters
SCREAMING_SNAKE_CASE : Dict = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
SCREAMING_SNAKE_CASE : Any = torch.clamp(self.logvar , -30.0 , 20.0 )
SCREAMING_SNAKE_CASE : Optional[int] = deterministic
SCREAMING_SNAKE_CASE : Optional[Any] = torch.exp(0.5 * self.logvar )
SCREAMING_SNAKE_CASE : Tuple = torch.exp(self.logvar )
if self.deterministic:
SCREAMING_SNAKE_CASE : int = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __A ( self : Any , UpperCamelCase__ : Optional[torch.Generator] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.mean + self.std * sample
return x
def __A ( self : str , UpperCamelCase__ : str=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __A ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
SCREAMING_SNAKE_CASE : Optional[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def __A ( self : List[str] ):
'''simple docstring'''
return self.mean
| 701
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """layoutlmv3"""
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any]=5_0265 , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Tuple=3072 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=512 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[Any]=1E-5 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : int=0 , UpperCamelCase__ : str=2 , UpperCamelCase__ : List[str]=1024 , UpperCamelCase__ : str=128 , UpperCamelCase__ : str=128 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[int]=32 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Optional[Any]=64 , UpperCamelCase__ : Dict=256 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Dict=224 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
vocab_size=UpperCamelCase__ , hidden_size=UpperCamelCase__ , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , intermediate_size=UpperCamelCase__ , hidden_act=UpperCamelCase__ , hidden_dropout_prob=UpperCamelCase__ , attention_probs_dropout_prob=UpperCamelCase__ , max_position_embeddings=UpperCamelCase__ , type_vocab_size=UpperCamelCase__ , initializer_range=UpperCamelCase__ , layer_norm_eps=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : List[str] = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : str = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Union[str, Any] = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Union[str, Any] = text_embed
SCREAMING_SNAKE_CASE : List[str] = visual_embed
SCREAMING_SNAKE_CASE : Optional[Any] = input_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = patch_size
SCREAMING_SNAKE_CASE : List[Any] = classifier_dropout
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = version.parse("""1.12""")
@property
def __A ( self : str ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __A ( self : int ):
'''simple docstring'''
return 1E-5
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Optional[Any] , UpperCamelCase__ : "ProcessorMixin" , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , UpperCamelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : List[Any] = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : Any = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = dict(
processor(
UpperCamelCase__ , text=UpperCamelCase__ , boxes=UpperCamelCase__ , return_tensors=UpperCamelCase__ , ) )
return inputs
| 34
| 0
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = BarthezTokenizer
UpperCamelCase_ = BarthezTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = True
def __A ( self : Dict ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Tuple = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = tokenizer
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = '''<pad>'''
SCREAMING_SNAKE_CASE : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(UpperCamelCase__ ) , 10_1122 )
def __A ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE : List[str] = [0, 57, 3018, 7_0307, 91, 2]
SCREAMING_SNAKE_CASE : Any = self.tokenizer(
UpperCamelCase__ , max_length=len(UpperCamelCase__ ) , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors='''pt''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : int = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
SCREAMING_SNAKE_CASE : List[str] = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=UpperCamelCase__ , )
| 702
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = FunnelTokenizer
UpperCamelCase_ = FunnelTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = True
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE : int = '''unwanted, running'''
return input_text, output_text
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
SCREAMING_SNAKE_CASE : int = tokenizer('''UNwant\u00E9d,running''' )
SCREAMING_SNAKE_CASE : Optional[Any] = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 34
| 0
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """umt5"""
UpperCamelCase_ = ["""past_key_values"""]
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=25_0112 , UpperCamelCase__ : List[str]=512 , UpperCamelCase__ : Tuple=64 , UpperCamelCase__ : Any=1024 , UpperCamelCase__ : Optional[Any]=8 , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[Any]=6 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : Union[str, Any]=128 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Optional[int]=1E-6 , UpperCamelCase__ : List[str]=1.0 , UpperCamelCase__ : Dict="gated-gelu" , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Any="T5Tokenizer" , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : str=1 , UpperCamelCase__ : List[Any]=0 , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=UpperCamelCase__ , tokenizer_class=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : Any = d_model
SCREAMING_SNAKE_CASE : Optional[int] = d_kv
SCREAMING_SNAKE_CASE : List[Any] = d_ff
SCREAMING_SNAKE_CASE : Dict = num_layers
SCREAMING_SNAKE_CASE : List[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
SCREAMING_SNAKE_CASE : int = num_heads
SCREAMING_SNAKE_CASE : str = relative_attention_num_buckets
SCREAMING_SNAKE_CASE : Optional[int] = relative_attention_max_distance
SCREAMING_SNAKE_CASE : int = dropout_rate
SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Any = initializer_factor
SCREAMING_SNAKE_CASE : Optional[int] = feed_forward_proj
SCREAMING_SNAKE_CASE : Tuple = use_cache
SCREAMING_SNAKE_CASE : str = self.feed_forward_proj.split('''-''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = act_info[-1]
SCREAMING_SNAKE_CASE : int = act_info[0] == '''gated'''
if len(UpperCamelCase__ ) > 1 and act_info[0] != "gated" or len(UpperCamelCase__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
SCREAMING_SNAKE_CASE : Optional[int] = '''gelu_new'''
@property
def __A ( self : Dict ):
'''simple docstring'''
return self.d_model
@property
def __A ( self : str ):
'''simple docstring'''
return self.num_heads
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
return self.num_layers
class lowercase__ ( UpperCamelCase_):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
SCREAMING_SNAKE_CASE : List[Any] = '''past_encoder_sequence + sequence'''
SCREAMING_SNAKE_CASE : Dict = {0: '''batch'''}
SCREAMING_SNAKE_CASE : Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __A ( self : Dict ):
'''simple docstring'''
return 13
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
return 5E-4
| 703
|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = dataset
SCREAMING_SNAKE_CASE : Optional[Any] = process
SCREAMING_SNAKE_CASE : Union[str, Any] = params
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : List[str] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.dataset[i]
SCREAMING_SNAKE_CASE : Optional[int] = self.process(UpperCamelCase__ , **self.params )
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = loader
SCREAMING_SNAKE_CASE : List[Any] = infer
SCREAMING_SNAKE_CASE : int = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = loader_batch_size
# Internal bookkeeping
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : int = None
def __len__( self : int ):
'''simple docstring'''
return len(self.loader )
def __iter__( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
SCREAMING_SNAKE_CASE : Optional[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
SCREAMING_SNAKE_CASE : Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
SCREAMING_SNAKE_CASE : int = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
SCREAMING_SNAKE_CASE : Tuple = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
SCREAMING_SNAKE_CASE : Any = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
SCREAMING_SNAKE_CASE : Tuple = next(self.iterator )
SCREAMING_SNAKE_CASE : List[Any] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Optional[int] = processed
else:
SCREAMING_SNAKE_CASE : int = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : int = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[Any] = observed_batch_size
# Setting internal index to unwrap the batch
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : int = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = iter(self.loader )
SCREAMING_SNAKE_CASE : List[Any] = None
return self
def __A ( self : List[str] ):
'''simple docstring'''
if self.subiterator is None:
SCREAMING_SNAKE_CASE : Dict = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
SCREAMING_SNAKE_CASE : Any = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
SCREAMING_SNAKE_CASE : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
SCREAMING_SNAKE_CASE : Union[str, Any] = next(self.subiterator )
return processed
class lowercase__ ( UpperCamelCase_):
def __iter__( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Tuple = self.loader_batch_item()
SCREAMING_SNAKE_CASE : Any = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
SCREAMING_SNAKE_CASE : Any = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Tuple = processed
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : List[str] = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[str] = observed_batch_size
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : str = 0
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Any = self.loader_batch_item()
SCREAMING_SNAKE_CASE : List[Any] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
SCREAMING_SNAKE_CASE : int = processed
SCREAMING_SNAKE_CASE : List[str] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
return accumulator
class lowercase__ ( UpperCamelCase_):
def __init__( self : Optional[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = dataset
SCREAMING_SNAKE_CASE : Dict = key
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Dict , UpperCamelCase__ : Tuple ):
'''simple docstring'''
return self.dataset[i][self.key]
class lowercase__ ( UpperCamelCase_):
def __init__( self : List[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataset
SCREAMING_SNAKE_CASE : List[str] = keya
SCREAMING_SNAKE_CASE : Tuple = keya
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 34
| 0
|
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__UpperCamelCase : Tuple = re.compile(R'^(?P<major>\d+)' R'\.(?P<minor>\d+)' R'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class lowercase__ :
UpperCamelCase_ = 42
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = _str_to_version_tuple(self.version_str )
def __repr__( self : List[str] ):
'''simple docstring'''
return f"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def __A ( self : Tuple ):
'''simple docstring'''
return self.major, self.minor, self.patch
def __A ( self : Any , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return Version(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return other
raise TypeError(f"""{other} (type {type(UpperCamelCase__ )}) cannot be compared to version.""" )
def __eq__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE : List[str] = self._validate_operand(UpperCamelCase__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : int , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self._validate_operand(UpperCamelCase__ )
return self.tuple < other.tuple
def __hash__( self : str ):
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __A ( cls : Any , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __A ( self : Tuple ):
'''simple docstring'''
return self.version_str
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = _VERSION_REG.match(_lowercase )
if not res:
raise ValueError(f"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(_lowercase ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def A ( _lowercase ):
return ".".join(str(_lowercase ) for v in version_tuple )
| 704
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """deberta-v2"""
def __init__( self : Optional[Any] , UpperCamelCase__ : Any=12_8100 , UpperCamelCase__ : Optional[int]=1536 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : List[str]=24 , UpperCamelCase__ : Tuple=6144 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : List[Any]=1E-7 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=-1 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : str="gelu" , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = relative_attention
SCREAMING_SNAKE_CASE : Optional[Any] = max_relative_positions
SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCamelCase__ ) == str:
SCREAMING_SNAKE_CASE : Optional[int] = [x.strip() for x in pos_att_type.lower().split('''|''' )]
SCREAMING_SNAKE_CASE : Any = pos_att_type
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''pooler_hidden_size''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pooler_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = pooler_hidden_act
class lowercase__ ( UpperCamelCase_):
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Dict , UpperCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : "PreTrainedTokenizerBase" = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().generate_dummy_inputs(preprocessor=UpperCamelCase__ , framework=UpperCamelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 34
| 0
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : int = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') )
return token
def A ( ):
SCREAMING_SNAKE_CASE : Any = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : str = 1_000
SCREAMING_SNAKE_CASE : Optional[Any] = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : List[Any] = num_labels
SCREAMING_SNAKE_CASE : List[str] = json.load(open(cached_download(hf_hub_url(_lowercase , _lowercase , repo_type='''dataset''' ) ) , '''r''' ) )
SCREAMING_SNAKE_CASE : Tuple = {int(_lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[Any] = idalabel
SCREAMING_SNAKE_CASE : Any = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : int = CvtConfig(num_labels=_lowercase , idalabel=_lowercase , labelaid=_lowercase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
SCREAMING_SNAKE_CASE : int = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
SCREAMING_SNAKE_CASE : Dict = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
SCREAMING_SNAKE_CASE : Any = [2, 2, 20]
SCREAMING_SNAKE_CASE : List[str] = [3, 12, 16]
SCREAMING_SNAKE_CASE : int = [192, 768, 1_024]
SCREAMING_SNAKE_CASE : Any = CvtForImageClassification(_lowercase )
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : Any = torch.load(_lowercase , map_location=torch.device('''cpu''' ) )
SCREAMING_SNAKE_CASE : str = OrderedDict()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
SCREAMING_SNAKE_CASE : List[str] = list_of_state_dict + cls_token(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = list_of_state_dict + embeddings(_lowercase )
for cnt in range(config.depth[idx] ):
SCREAMING_SNAKE_CASE : List[Any] = list_of_state_dict + attention(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : Any = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_lowercase )
for i in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE : Tuple = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__UpperCamelCase : Tuple = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 705
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : Any = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Any = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {int(_lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE : Optional[int] = BitConfig(
conv_layer=_lowercase , num_labels=1_000 , idalabel=_lowercase , labelaid=_lowercase , )
return config
def A ( _lowercase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE : str = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''bit.encoder.''' + name
return name
def A ( ):
SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def A ( _lowercase , _lowercase , _lowercase=False ):
SCREAMING_SNAKE_CASE : List[Any] = get_config(_lowercase )
# load original model from timm
SCREAMING_SNAKE_CASE : Optional[Any] = create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE : Optional[int] = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE : str = BitForImageClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# create image processor
SCREAMING_SNAKE_CASE : Optional[Any] = create_transform(**resolve_data_config({} , model=_lowercase ) )
SCREAMING_SNAKE_CASE : List[str] = transform.transforms
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE : Tuple = BitImageProcessor(
do_resize=_lowercase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowercase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = transform(_lowercase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : Optional[int] = processor(_lowercase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_lowercase , _lowercase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE : List[Any] = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 34
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Optional[Any] = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
__UpperCamelCase : Optional[int] = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
__UpperCamelCase : Any = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
__UpperCamelCase : List[str] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 706
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__UpperCamelCase : str = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase : int = logging.getLogger()
def A ( ):
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('''-f''' )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
return args.f
def A ( _lowercase , _lowercase="eval" ):
SCREAMING_SNAKE_CASE : Dict = os.path.join(_lowercase , f"""{split}_results.json""" )
if os.path.exists(_lowercase ):
with open(_lowercase , '''r''' ) as f:
return json.load(_lowercase )
raise ValueError(f"""can't find {path}""" )
__UpperCamelCase : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase__ ( UpperCamelCase_):
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Tuple = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_glue.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : str = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_clm_flax.main()
SCREAMING_SNAKE_CASE : Dict = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_summarization_flax.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Dict = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_mlm_flax.main()
SCREAMING_SNAKE_CASE : List[Any] = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_ta_mlm_flax.main()
SCREAMING_SNAKE_CASE : Optional[int] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Any = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_ner.main()
SCREAMING_SNAKE_CASE : List[str] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_qa.main()
SCREAMING_SNAKE_CASE : str = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 34
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : int = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """roformer"""
def __init__( self : Dict , UpperCamelCase__ : List[str]=5_0000 , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Optional[Any]=3072 , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Dict=1536 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : int=1E-12 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Optional[Any]=True , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size if embedding_size is None else embedding_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = rotary_value
SCREAMING_SNAKE_CASE : List[str] = use_cache
class lowercase__ ( UpperCamelCase_):
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 1: '''sequence'''}
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 707
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCamelCase : Dict = random.Random()
def A ( _lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ):
if rng is None:
SCREAMING_SNAKE_CASE : Any = global_rng
SCREAMING_SNAKE_CASE : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__ ( unittest.TestCase):
def __init__( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str=7 , UpperCamelCase__ : Any=400 , UpperCamelCase__ : List[str]=2000 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : str=30 , UpperCamelCase__ : Tuple=4_4100 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : str = min_seq_length
SCREAMING_SNAKE_CASE : Dict = max_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : Optional[Any] = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[int] = feature_size
SCREAMING_SNAKE_CASE : Tuple = num_audio_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = chunk_length
SCREAMING_SNAKE_CASE : str = sampling_rate
def __A ( self : Optional[Any] ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __A ( self : Tuple , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
def _flatten(UpperCamelCase__ : str ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
SCREAMING_SNAKE_CASE : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Optional[Any] = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = TvltFeatureExtractor
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = TvltFeatureExtractionTester(self )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''spectrogram_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''feature_size''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''num_audio_channels''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''hop_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''chunk_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''sampling_rate''' ) )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : Optional[int] = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Optional[int] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Tuple = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : int = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Any = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(
UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 , mask_audio=UpperCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE : Dict = np.asarray(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __A ( self : Optional[int] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : Dict = ds.sort('''id''' ).select(range(UpperCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : int = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
SCREAMING_SNAKE_CASE : str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCamelCase__ , atol=1E-4 ) )
| 34
| 0
|
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class lowercase__ :
UpperCamelCase_ = field(
metadata={"""help""": """The output directory where the model will be written."""} , )
UpperCamelCase_ = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} , )
UpperCamelCase_ = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} , )
UpperCamelCase_ = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""})
UpperCamelCase_ = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""})
def A ( ):
SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser((ModelArguments,) )
(SCREAMING_SNAKE_CASE ) : int = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[Any] = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=_lowercase , decoder_config=_lowercase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
SCREAMING_SNAKE_CASE : Tuple = decoder_config.decoder_start_token_id
SCREAMING_SNAKE_CASE : str = decoder_config.pad_token_id
if decoder_start_token_id is None:
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_config.bos_token_id
if pad_token_id is None:
SCREAMING_SNAKE_CASE : str = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
SCREAMING_SNAKE_CASE : List[str] = decoder_config.eos_token_id
SCREAMING_SNAKE_CASE : Optional[int] = decoder_start_token_id
SCREAMING_SNAKE_CASE : Tuple = pad_token_id
SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 708
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_):
UpperCamelCase_ = 1
@register_to_config
def __init__( self : List[str] , UpperCamelCase__ : int = 1000 , UpperCamelCase__ : Optional[Union[np.ndarray, List[float]]] = None ):
'''simple docstring'''
self.set_timesteps(UpperCamelCase__ )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE : str = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
SCREAMING_SNAKE_CASE : Tuple = 4
# running values
SCREAMING_SNAKE_CASE : int = []
def __A ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = num_inference_steps
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
SCREAMING_SNAKE_CASE : Tuple = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
SCREAMING_SNAKE_CASE : int = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sin(steps * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE : Dict = (1.0 - self.betas**2) ** 0.5
SCREAMING_SNAKE_CASE : Optional[Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
SCREAMING_SNAKE_CASE : List[str] = timesteps.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = []
def __A ( self : Tuple , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
SCREAMING_SNAKE_CASE : Optional[int] = (self.timesteps == timestep).nonzero().item()
SCREAMING_SNAKE_CASE : Union[str, Any] = timestep_index + 1
SCREAMING_SNAKE_CASE : int = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase__ )
if len(self.ets ) == 1:
SCREAMING_SNAKE_CASE : Dict = self.ets[-1]
elif len(self.ets ) == 2:
SCREAMING_SNAKE_CASE : Optional[int] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
SCREAMING_SNAKE_CASE : str = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
SCREAMING_SNAKE_CASE : Optional[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
SCREAMING_SNAKE_CASE : Optional[int] = self._get_prev_sample(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : torch.FloatTensor , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return sample
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.alphas[timestep_index]
SCREAMING_SNAKE_CASE : List[str] = self.betas[timestep_index]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.alphas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Tuple = self.betas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Dict = (sample - sigma * ets) / max(UpperCamelCase__ , 1E-8 )
SCREAMING_SNAKE_CASE : Optional[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 34
| 0
|
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__UpperCamelCase : Optional[Any] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__UpperCamelCase : str = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
__UpperCamelCase : List[str] = 'zero2'
__UpperCamelCase : Any = 'zero3'
__UpperCamelCase : Any = [ZEROa, ZEROa]
def A ( _lowercase , _lowercase , _lowercase ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
SCREAMING_SNAKE_CASE : int = parameterized.to_safe_name('''_'''.join(str(_lowercase ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
__UpperCamelCase : Optional[int] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowercase__ ( UpperCamelCase_):
@parameterized.expand(UpperCamelCase__ , name_func=UpperCamelCase__ )
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ):
'''simple docstring'''
self.run_and_check(
stage=UpperCamelCase__ , model=UpperCamelCase__ , distributed=UpperCamelCase__ , fpaa=UpperCamelCase__ , )
@require_torch_multi_gpu
@parameterized.expand(UpperCamelCase__ , name_func=UpperCamelCase__ )
def __A ( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int ):
'''simple docstring'''
self.run_and_check(
stage=UpperCamelCase__ , model=UpperCamelCase__ , distributed=UpperCamelCase__ , fpaa=UpperCamelCase__ , )
@parameterized.expand(UpperCamelCase__ , name_func=UpperCamelCase__ )
def __A ( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
self.run_and_check(
stage=UpperCamelCase__ , model=UpperCamelCase__ , distributed=UpperCamelCase__ , fpaa=UpperCamelCase__ , )
@require_torch_multi_gpu
@parameterized.expand(UpperCamelCase__ , name_func=UpperCamelCase__ )
def __A ( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
self.run_and_check(
stage=UpperCamelCase__ , model=UpperCamelCase__ , distributed=UpperCamelCase__ , fpaa=UpperCamelCase__ , )
def __A ( self : Optional[Any] , UpperCamelCase__ : Tuple ):
'''simple docstring'''
pass
def __A ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int = 10 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = models[model]
SCREAMING_SNAKE_CASE : Optional[int] = self.run_trainer(
stage=UpperCamelCase__ , model_name=UpperCamelCase__ , eval_steps=UpperCamelCase__ , num_train_epochs=1 , distributed=UpperCamelCase__ , fpaa=UpperCamelCase__ , )
self.do_checks(UpperCamelCase__ )
return output_dir
def __A ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 1 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.get_auto_remove_tmp_dir('''./xxx''' , after=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = f"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(UpperCamelCase__ )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
SCREAMING_SNAKE_CASE : Optional[int] = f"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
SCREAMING_SNAKE_CASE : Optional[int] = [f"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
SCREAMING_SNAKE_CASE : List[str] = self.get_launcher(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCamelCase__ , env=self.get_env() )
return output_dir
def __A ( self : Any , UpperCamelCase__ : Any=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = min(2 , get_gpu_count() ) if distributed else 1
return f"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 709
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = IFPipeline
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __A ( self : Tuple ):
'''simple docstring'''
return self._get_dummy_components()
def __A ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : int=0 ):
'''simple docstring'''
if str(UpperCamelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self : List[str] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __A ( self : Any ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __A ( self : List[Any] ):
'''simple docstring'''
self._test_save_load_local()
def __A ( self : List[str] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __A ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE : Tuple = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def A ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 34
| 0
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def A ( _lowercase ) -> List[str]:
SCREAMING_SNAKE_CASE : Union[str, Any] = np.inf
def set_batch_size(_lowercase ) -> None:
nonlocal batch_size
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : int = min(_lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = min(_lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_lowercase , _lowercase ) and feature.dtype == "binary":
SCREAMING_SNAKE_CASE : Optional[Any] = min(_lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_lowercase , _lowercase )
return None if batch_size is np.inf else batch_size
class lowercase__ ( UpperCamelCase_):
def __init__( self : Tuple , UpperCamelCase__ : NestedDataStructureLike[PathLike] , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : Optional[Features] = None , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase__ , split=UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ , streaming=UpperCamelCase__ , num_proc=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE : Union[str, Any] = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
SCREAMING_SNAKE_CASE : Optional[Any] = Parquet(
cache_dir=UpperCamelCase__ , data_files=UpperCamelCase__ , features=UpperCamelCase__ , hash=UpperCamelCase__ , **UpperCamelCase__ , )
def __A ( self : str ):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
self.builder.download_and_prepare(
download_config=UpperCamelCase__ , download_mode=UpperCamelCase__ , verification_mode=UpperCamelCase__ , base_path=UpperCamelCase__ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE : str = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class lowercase__ :
def __init__( self : List[str] , UpperCamelCase__ : Dataset , UpperCamelCase__ : Union[PathLike, BinaryIO] , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = dataset
SCREAMING_SNAKE_CASE : Any = path_or_buf
SCREAMING_SNAKE_CASE : Any = batch_size or get_writer_batch_size(dataset.features )
SCREAMING_SNAKE_CASE : Tuple = parquet_writer_kwargs
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
SCREAMING_SNAKE_CASE : Optional[int] = self._write(file_obj=UpperCamelCase__ , batch_size=UpperCamelCase__ , **self.parquet_writer_kwargs )
else:
SCREAMING_SNAKE_CASE : List[Any] = self._write(file_obj=self.path_or_buf , batch_size=UpperCamelCase__ , **self.parquet_writer_kwargs )
return written
def __A ( self : Tuple , UpperCamelCase__ : BinaryIO , UpperCamelCase__ : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : str = parquet_writer_kwargs.pop('''path_or_buf''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.dataset.features.arrow_schema
SCREAMING_SNAKE_CASE : List[Any] = pq.ParquetWriter(UpperCamelCase__ , schema=UpperCamelCase__ , **UpperCamelCase__ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCamelCase__ ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
SCREAMING_SNAKE_CASE : List[Any] = query_table(
table=self.dataset._data , key=slice(UpperCamelCase__ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCamelCase__ )
written += batch.nbytes
writer.close()
return written
| 710
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__UpperCamelCase : int = logging.get_logger(__name__)
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
def constraint_to_multiple_of(_lowercase , _lowercase , _lowercase=0 , _lowercase=None ):
SCREAMING_SNAKE_CASE : int = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE : Dict = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE : Optional[Any] = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE : Optional[Any] = (output_size, output_size) if isinstance(_lowercase , _lowercase ) else output_size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = get_image_size(_lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE : Dict = output_height / input_height
SCREAMING_SNAKE_CASE : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE : List[Any] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE : List[Any] = scale_height
SCREAMING_SNAKE_CASE : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = constraint_to_multiple_of(scale_width * input_width , multiple=_lowercase )
return (new_height, new_width)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""pixel_values"""]
def __init__( self : int , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'''height''': 384, '''width''': 384}
SCREAMING_SNAKE_CASE : Any = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : Any = size
SCREAMING_SNAKE_CASE : str = keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(
UpperCamelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCamelCase__ , multiple=UpperCamelCase__ , )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Dict = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Tuple = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Dict = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Any = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Any = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Optional[int] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Tuple] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE : Optional[int] = []
for idx in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : List[Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 34
| 0
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCamelCase : str = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCamelCase : Tuple = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCamelCase : Optional[int] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : int = len([g for position, g in enumerate(_lowercase ) if g == main_target[position]] )
return (item, float(_lowercase ))
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = random.randint(0 , len(_lowercase ) - 1 )
SCREAMING_SNAKE_CASE : List[str] = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = list(_lowercase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE : List[Any] = random.choice(_lowercase )
return "".join(_lowercase )
def A ( _lowercase , _lowercase , _lowercase , ):
SCREAMING_SNAKE_CASE : List[str] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE : Optional[int] = int(parent_a[1] * 100 ) + 1
SCREAMING_SNAKE_CASE : Tuple = 10 if child_n >= 10 else child_n
for _ in range(_lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = population_score[random.randint(0 , _lowercase )][0]
SCREAMING_SNAKE_CASE : Optional[Any] = crossover(parent_a[0] , _lowercase )
# Append new string to the population list.
pop.append(mutate(_lowercase , _lowercase ) )
pop.append(mutate(_lowercase , _lowercase ) )
return pop
def A ( _lowercase , _lowercase , _lowercase = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE : List[Any] = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(_lowercase )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE : str = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE : List[Any] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(_lowercase )
# Generate random starting population.
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for _ in range(_lowercase ):
population.append(''''''.join([random.choice(_lowercase ) for i in range(len(_lowercase ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE : Dict = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowercase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE : List[Any] = [evaluate(_lowercase , _lowercase ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE : Any = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE : Union[str, Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowercase )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE : List[Any] = [
(item, score / len(_lowercase )) for item, score in population_score
]
# This is selection
for i in range(_lowercase ):
population.extend(select(population_score[int(_lowercase )] , _lowercase , _lowercase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowercase ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCamelCase : str = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
__UpperCamelCase : Optional[int] = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
__UpperCamelCase : Optional[Any] = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 711
|
import random
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(_lowercase )
elif element > pivot:
greater.append(_lowercase )
else:
equal.append(_lowercase )
return less, equal, greater
def A ( _lowercase , _lowercase ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_lowercase ) or index < 0:
return None
SCREAMING_SNAKE_CASE : Dict = items[random.randint(0 , len(_lowercase ) - 1 )]
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = _partition(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowercase , _lowercase )
# must be in larger
else:
return quick_select(_lowercase , index - (m + count) )
| 34
| 0
|
from __future__ import annotations
def A ( _lowercase ):
'''simple docstring'''
return len(set(_lowercase ) ) == len(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
# TODO Update this
__UpperCamelCase : List[str] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """esm"""
def __init__( self : Tuple , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any=768 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : str=12 , UpperCamelCase__ : Optional[int]=3072 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Union[str, Any]=1026 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = position_embedding_type
SCREAMING_SNAKE_CASE : Any = use_cache
SCREAMING_SNAKE_CASE : Dict = emb_layer_norm_before
SCREAMING_SNAKE_CASE : List[str] = token_dropout
SCREAMING_SNAKE_CASE : List[Any] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
SCREAMING_SNAKE_CASE : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = EsmFoldConfig(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
SCREAMING_SNAKE_CASE : Optional[int] = get_default_vocab_list()
else:
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_list
else:
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = None
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = 0
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Optional[int] ):
'''simple docstring'''
if self.trunk is None:
SCREAMING_SNAKE_CASE : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = TrunkConfig(**self.trunk )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = asdict(self )
SCREAMING_SNAKE_CASE : Tuple = self.trunk.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 48
UpperCamelCase_ = 1_024
UpperCamelCase_ = 128
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = False
UpperCamelCase_ = 4
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Any ):
'''simple docstring'''
if self.structure_module is None:
SCREAMING_SNAKE_CASE : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
SCREAMING_SNAKE_CASE : Dict = self.sequence_state_dim // self.sequence_head_width
SCREAMING_SNAKE_CASE : Tuple = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = asdict(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.structure_module.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 384
UpperCamelCase_ = 128
UpperCamelCase_ = 16
UpperCamelCase_ = 128
UpperCamelCase_ = 12
UpperCamelCase_ = 4
UpperCamelCase_ = 8
UpperCamelCase_ = 0.1
UpperCamelCase_ = 8
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 7
UpperCamelCase_ = 10
UpperCamelCase_ = 1E-8
UpperCamelCase_ = 1E5
def __A ( self : Dict ):
'''simple docstring'''
return asdict(self )
def A ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 34
| 0
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__UpperCamelCase : Optional[Any] = ['text', 'image', 'audio']
def A ( _lowercase ) -> Dict:
SCREAMING_SNAKE_CASE : Dict = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(_lowercase , _lowercase ):
inputs.append(create_inputs(_lowercase ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def A ( _lowercase ) -> List[str]:
SCREAMING_SNAKE_CASE : Dict = []
for output in outputs:
if isinstance(_lowercase , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(_lowercase , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(_lowercase , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class lowercase__ :
def __A ( self : Union[str, Any] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
SCREAMING_SNAKE_CASE : List[str] = self.tool.inputs
for _input in inputs:
if isinstance(_input , UpperCamelCase__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE : Dict = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tool(*UpperCamelCase__ )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE : Optional[int] = [outputs]
self.assertListEqual(output_types(UpperCamelCase__ ) , self.tool.outputs )
def __A ( self : Any ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE : str = self.tool(*UpperCamelCase__ )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [outputs]
self.assertEqual(len(UpperCamelCase__ ) , len(self.tool.outputs ) )
for output, output_type in zip(UpperCamelCase__ , self.tool.outputs ):
SCREAMING_SNAKE_CASE : Optional[int] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__ ) )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE : Dict = []
for _input, input_type in zip(UpperCamelCase__ , self.tool.inputs ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE : List[Any] = self.tool(*UpperCamelCase__ )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = [outputs]
self.assertEqual(len(UpperCamelCase__ ) , len(self.tool.outputs ) )
| 713
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
])
class lowercase__ ( unittest.TestCase):
def __A ( self : Any ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=UpperCamelCase__ , )
assert hasattr(self , '''env''' )
def __A ( self : str , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
SCREAMING_SNAKE_CASE : Any = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCamelCase__ , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version='''py36''' , )
def __A ( self : Optional[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __A ( self : Tuple , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCamelCase__ )
| 34
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """decision_transformer"""
UpperCamelCase_ = ["""past_key_values"""]
UpperCamelCase_ = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : str , UpperCamelCase__ : Optional[int]=17 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Dict=4096 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : str=1 , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any]="relu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Optional[int]=1E-5 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=5_0256 , UpperCamelCase__ : int=5_0256 , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Any=False , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = state_dim
SCREAMING_SNAKE_CASE : Optional[int] = act_dim
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Any = max_ep_len
SCREAMING_SNAKE_CASE : List[str] = action_tanh
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Tuple = n_positions
SCREAMING_SNAKE_CASE : List[Any] = n_layer
SCREAMING_SNAKE_CASE : str = n_head
SCREAMING_SNAKE_CASE : Union[str, Any] = n_inner
SCREAMING_SNAKE_CASE : List[str] = activation_function
SCREAMING_SNAKE_CASE : int = resid_pdrop
SCREAMING_SNAKE_CASE : Any = embd_pdrop
SCREAMING_SNAKE_CASE : str = attn_pdrop
SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : List[str] = scale_attn_weights
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : List[Any] = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE : Optional[int] = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE : Dict = bos_token_id
SCREAMING_SNAKE_CASE : Tuple = eos_token_id
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 714
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCamelCase : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCamelCase : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if ' ' in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__UpperCamelCase : Optional[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 34
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Union[str, Any] = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
__UpperCamelCase : List[str] = {
'gpt-neox-20b': 2048,
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : List[Any]="<|endoftext|>" , UpperCamelCase__ : Any="<|endoftext|>" , UpperCamelCase__ : Optional[Any]="<|endoftext|>" , UpperCamelCase__ : Union[str, Any]=False , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE : List[str] = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Dict = add_prefix_space
SCREAMING_SNAKE_CASE : List[str] = pre_tok_class(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
def __A ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : "Conversation" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE : Dict = input_ids[-self.model_max_length :]
return input_ids
| 715
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Dict = None
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Union[str, Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = TaTokenizer
UpperCamelCase_ = []
def __init__( self : str , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]=100 , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE : List[str] = [f"""<extra_id_{i}>""" for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
SCREAMING_SNAKE_CASE : int = len(set(filter(lambda UpperCamelCase__ : bool('''extra_id_''' in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : int = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : str = extra_ids
@staticmethod
def __A ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
SCREAMING_SNAKE_CASE : List[str] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase__ , )
return max_model_length
def __A ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Any = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def __A ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
SCREAMING_SNAKE_CASE : Tuple = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __A ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(r'''<extra_id_\d+>''' , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self : List[Any] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 34
| 0
|
from __future__ import annotations
from typing import Any
class lowercase__ :
def __init__( self : Any , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = num_of_nodes
SCREAMING_SNAKE_CASE : list[list[int]] = []
SCREAMING_SNAKE_CASE : dict[int, int] = {}
def __A ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def __A ( self : Tuple , UpperCamelCase__ : int ):
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def __A ( self : Tuple , UpperCamelCase__ : int ):
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
SCREAMING_SNAKE_CASE : Optional[int] = self.find_component(UpperCamelCase__ )
def __A ( self : str , UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
SCREAMING_SNAKE_CASE : int = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCamelCase__ )
elif component_size[u_node] >= component_size[v_node]:
SCREAMING_SNAKE_CASE : Optional[int] = self.find_component(UpperCamelCase__ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
SCREAMING_SNAKE_CASE : List[str] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
SCREAMING_SNAKE_CASE : int = edge
SCREAMING_SNAKE_CASE : Union[str, Any] = self.m_component[u]
SCREAMING_SNAKE_CASE : List[str] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
SCREAMING_SNAKE_CASE : Union[str, Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : str = edge
SCREAMING_SNAKE_CASE : List[str] = self.m_component[u]
SCREAMING_SNAKE_CASE : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
SCREAMING_SNAKE_CASE : Union[str, Any] = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def A ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCamelCase : str = False
class lowercase__ ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = VersatileDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = '''cyberpunk 2077'''
SCREAMING_SNAKE_CASE : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe.dual_guided(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe.text_to_image(
prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.image_variation(UpperCamelCase__ , generator=UpperCamelCase__ , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 34
| 0
|
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : int = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1_024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1_024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1e-5,
'''token_type_vocab_size''': 2,
}
SCREAMING_SNAKE_CASE : List[str] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
SCREAMING_SNAKE_CASE : Optional[Any] = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=_lowercase , output_all_encodings=_lowercase , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , _lowercase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
SCREAMING_SNAKE_CASE : int = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(get_home_dir() , '''models''' )
SCREAMING_SNAKE_CASE : Optional[Any] = _load_vocab(_lowercase , _lowercase , _lowercase , cls=_lowercase )
SCREAMING_SNAKE_CASE : int = nlp.model.BERTModel(
_lowercase , len(_lowercase ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=_lowercase , use_token_type_embed=_lowercase , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=_lowercase , use_decoder=_lowercase , )
original_bort.load_parameters(_lowercase , cast_dtype=_lowercase , ignore_extra=_lowercase )
SCREAMING_SNAKE_CASE : int = original_bort._collect_params_with_prefix()
# Build our config 🤗
SCREAMING_SNAKE_CASE : Any = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(_lowercase ),
}
SCREAMING_SNAKE_CASE : List[str] = BertConfig.from_dict(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = BertForMaskedLM(_lowercase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_lowercase ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = hf_param.shape
SCREAMING_SNAKE_CASE : Union[str, Any] = to_torch(params[gluon_param] )
SCREAMING_SNAKE_CASE : Tuple = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
SCREAMING_SNAKE_CASE : Optional[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
SCREAMING_SNAKE_CASE : int = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
SCREAMING_SNAKE_CASE : Dict = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
SCREAMING_SNAKE_CASE : int = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
SCREAMING_SNAKE_CASE : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
SCREAMING_SNAKE_CASE : BertSelfAttention = layer.attention.self
SCREAMING_SNAKE_CASE : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
SCREAMING_SNAKE_CASE : int = check_and_map_params(
self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
SCREAMING_SNAKE_CASE : Optional[int] = check_and_map_params(
self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
SCREAMING_SNAKE_CASE : Tuple = check_and_map_params(
self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = check_and_map_params(
self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = check_and_map_params(
self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
SCREAMING_SNAKE_CASE : BertSelfOutput = layer.attention.output
SCREAMING_SNAKE_CASE : List[str] = check_and_map_params(
self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" )
SCREAMING_SNAKE_CASE : Any = check_and_map_params(
self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" )
SCREAMING_SNAKE_CASE : str = check_and_map_params(
self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
SCREAMING_SNAKE_CASE : Optional[Any] = check_and_map_params(
self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
SCREAMING_SNAKE_CASE : BertIntermediate = layer.intermediate
SCREAMING_SNAKE_CASE : Tuple = check_and_map_params(
intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
SCREAMING_SNAKE_CASE : Optional[Any] = check_and_map_params(
intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
SCREAMING_SNAKE_CASE : BertOutput = layer.output
SCREAMING_SNAKE_CASE : int = check_and_map_params(
bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
SCREAMING_SNAKE_CASE : Optional[Any] = check_and_map_params(
bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
SCREAMING_SNAKE_CASE : Any = check_and_map_params(
bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
SCREAMING_SNAKE_CASE : Optional[Any] = check_and_map_params(
bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
SCREAMING_SNAKE_CASE : int = RobertaTokenizer.from_pretrained('''roberta-base''' )
SCREAMING_SNAKE_CASE : Any = tokenizer.encode_plus(_lowercase )['''input_ids''']
# Get gluon output
SCREAMING_SNAKE_CASE : Tuple = mx.nd.array([input_ids] )
SCREAMING_SNAKE_CASE : List[Any] = original_bort(inputs=_lowercase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = BertModel.from_pretrained(_lowercase )
hf_bort_model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode_plus(_lowercase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : str = hf_bort_model(**_lowercase )[0]
SCREAMING_SNAKE_CASE : Dict = output_gluon[0].asnumpy()
SCREAMING_SNAKE_CASE : List[str] = output_hf[0].detach().numpy()
SCREAMING_SNAKE_CASE : str = np.max(np.abs(hf_layer - gluon_layer ) ).item()
SCREAMING_SNAKE_CASE : List[str] = np.allclose(_lowercase , _lowercase , atol=1e-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , _lowercase )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 717
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def A ( _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = analyze_text(_lowercase )
SCREAMING_SNAKE_CASE : Any = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
SCREAMING_SNAKE_CASE : Tuple = sum(single_char_strings.values() )
# one length string
SCREAMING_SNAKE_CASE : Tuple = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
SCREAMING_SNAKE_CASE : Tuple = single_char_strings[ch]
SCREAMING_SNAKE_CASE : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
SCREAMING_SNAKE_CASE : Optional[Any] = sum(two_char_strings.values() )
SCREAMING_SNAKE_CASE : List[str] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
SCREAMING_SNAKE_CASE : Union[str, Any] = cha + cha
if sequence in two_char_strings:
SCREAMING_SNAKE_CASE : Any = two_char_strings[sequence]
SCREAMING_SNAKE_CASE : Dict = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = Counter() # type: ignore
SCREAMING_SNAKE_CASE : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def A ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 34
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : int = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 718
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Tuple = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34
| 0
|
import random
class lowercase__ :
@staticmethod
def __A ( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [ord(UpperCamelCase__ ) for i in text]
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : List[str] = []
for i in plain:
SCREAMING_SNAKE_CASE : Tuple = random.randint(1 , 300 )
SCREAMING_SNAKE_CASE : Optional[Any] = (i + k) * k
cipher.append(UpperCamelCase__ )
key.append(UpperCamelCase__ )
return cipher, key
@staticmethod
def __A ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : Dict = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(UpperCamelCase__ ) )
return "".join(UpperCamelCase__ )
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 719
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Tuple = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['MaskFormerFeatureExtractor']
__UpperCamelCase : List[Any] = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
__UpperCamelCase : Union[str, Any] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 34
| 0
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
__UpperCamelCase : Tuple = {'target_lang': 'fi', 'source_lang': 'en'}
__UpperCamelCase : Dict = '>>zh<<'
__UpperCamelCase : int = 'Helsinki-NLP/'
if is_torch_available():
__UpperCamelCase : List[str] = 'pt'
elif is_tf_available():
__UpperCamelCase : Union[str, Any] = 'tf'
else:
__UpperCamelCase : Union[str, Any] = 'jax'
@require_sentencepiece
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = MarianTokenizer
UpperCamelCase_ = False
UpperCamelCase_ = True
def __A ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Dict = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
SCREAMING_SNAKE_CASE : List[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
SCREAMING_SNAKE_CASE : Optional[int] = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
SCREAMING_SNAKE_CASE : int = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self : Any , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : Dict , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''</s>'''
SCREAMING_SNAKE_CASE : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(UpperCamelCase__ ) , 9 )
def __A ( self : str ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" )
SCREAMING_SNAKE_CASE : List[str] = en_de_tokenizer(['''I am a small frog'''] , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] )
SCREAMING_SNAKE_CASE : Dict = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = [x.name for x in Path(UpperCamelCase__ ).glob('''*''' )]
self.assertIn('''source.spm''' , UpperCamelCase__ )
MarianTokenizer.from_pretrained(UpperCamelCase__ )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[Any] = tok(
['''I am a small frog''' * 1000, '''I am a small frog'''] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE : int = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {'''input_ids''': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
SCREAMING_SNAKE_CASE : List[Any] = '''Tämä on testi'''
SCREAMING_SNAKE_CASE : Dict = '''This is a test'''
SCREAMING_SNAKE_CASE : List[Any] = [76, 7, 2047, 2]
SCREAMING_SNAKE_CASE : int = [69, 12, 11, 940, 2]
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer(text_target=UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 720
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCamelCase : Dict = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def A ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = True
while ask_again:
SCREAMING_SNAKE_CASE : Optional[Any] = input(_lowercase )
try:
if default is not None and len(_lowercase ) == 0:
return default
return convert_value(_lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowercase )
def A ( _lowercase , _lowercase=[] , _lowercase=None , _lowercase=0 ):
SCREAMING_SNAKE_CASE : Dict = BulletMenu(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : str = menu.run(default_choice=_lowercase )
return convert_value(_lowercase ) if convert_value is not None else result
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = int(_lowercase )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def A ( _lowercase ):
return {"yes": True, "no": False}[value.lower()]
class lowercase__ ( argparse.RawDescriptionHelpFormatter):
def __A ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = super()._format_usage(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 34
| 0
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A ( ):
SCREAMING_SNAKE_CASE : Union[str, Any] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=_lowercase , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=_lowercase , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=_lowercase )
return parser.parse_args()
def A ( ):
SCREAMING_SNAKE_CASE : Optional[int] = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE : Dict = script_fpath.stem
SCREAMING_SNAKE_CASE : List[str] = importlib.import_module(_lowercase )
# Patch sys.argv
SCREAMING_SNAKE_CASE : Union[str, Any] = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 721
|
from __future__ import annotations
from typing import Any
class lowercase__ ( UpperCamelCase_):
pass
class lowercase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = data
SCREAMING_SNAKE_CASE : Node | None = None
def __iter__( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self
SCREAMING_SNAKE_CASE : Tuple = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase__ )
yield node.data
SCREAMING_SNAKE_CASE : Dict = node.next_node
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__UpperCamelCase : List[Any] = Node(1)
__UpperCamelCase : str = Node(2)
__UpperCamelCase : Dict = Node(3)
__UpperCamelCase : List[Any] = Node(4)
print(root_node.has_loop) # False
__UpperCamelCase : int = root_node.next_node
print(root_node.has_loop) # True
__UpperCamelCase : Union[str, Any] = Node(5)
__UpperCamelCase : Union[str, Any] = Node(6)
__UpperCamelCase : List[Any] = Node(5)
__UpperCamelCase : List[str] = Node(6)
print(root_node.has_loop) # False
__UpperCamelCase : List[Any] = Node(1)
print(root_node.has_loop) # False
| 34
| 0
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__UpperCamelCase : int = logging.get_logger(__name__)
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
def constraint_to_multiple_of(_lowercase , _lowercase , _lowercase=0 , _lowercase=None ):
SCREAMING_SNAKE_CASE : int = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE : Dict = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE : Optional[Any] = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE : Optional[Any] = (output_size, output_size) if isinstance(_lowercase , _lowercase ) else output_size
SCREAMING_SNAKE_CASE : Optional[int] = get_image_size(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE : Dict = output_height / input_height
SCREAMING_SNAKE_CASE : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE : List[Any] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE : List[Any] = scale_height
SCREAMING_SNAKE_CASE : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = constraint_to_multiple_of(scale_width * input_width , multiple=_lowercase )
return (new_height, new_width)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""pixel_values"""]
def __init__( self : int , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'''height''': 384, '''width''': 384}
SCREAMING_SNAKE_CASE : Any = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : Any = size
SCREAMING_SNAKE_CASE : str = keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(
UpperCamelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCamelCase__ , multiple=UpperCamelCase__ , )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Dict = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Tuple = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Dict = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Any = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Any = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Optional[int] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Tuple] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE : Optional[int] = []
for idx in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : List[Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 700
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=64 , UpperCamelCase__ : Optional[Any]=4_8000 , UpperCamelCase__ : Tuple=480 , UpperCamelCase__ : Union[str, Any]=10 , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : int=False , UpperCamelCase__ : float = 0 , UpperCamelCase__ : float = 1_4000 , UpperCamelCase__ : int = None , UpperCamelCase__ : str = "fusion" , UpperCamelCase__ : str = "repeatpad" , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = top_db
SCREAMING_SNAKE_CASE : Union[str, Any] = truncation
SCREAMING_SNAKE_CASE : str = padding
SCREAMING_SNAKE_CASE : List[Any] = fft_window_size
SCREAMING_SNAKE_CASE : Tuple = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE : List[str] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = max_length_s
SCREAMING_SNAKE_CASE : Tuple = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE : List[Any] = sampling_rate
SCREAMING_SNAKE_CASE : List[str] = frequency_min
SCREAMING_SNAKE_CASE : Any = frequency_max
SCREAMING_SNAKE_CASE : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm=UpperCamelCase__ , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm='''slaney''' , mel_scale='''slaney''' , )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __A ( self : Optional[int] , UpperCamelCase__ : np.array , UpperCamelCase__ : Optional[np.array] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = spectrogram(
UpperCamelCase__ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase__ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : Any = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE : List[Any] = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE : int = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE : Optional[int] = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE : Optional[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE : Tuple = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE : str = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase__ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __A ( self : Dict , UpperCamelCase__ : np.array , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE : Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ ) - max_length
SCREAMING_SNAKE_CASE : Dict = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
SCREAMING_SNAKE_CASE : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE : List[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE : Tuple = False
else:
SCREAMING_SNAKE_CASE : str = self._random_mel_fusion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
SCREAMING_SNAKE_CASE : List[str] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE : Tuple = int(max_length / len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Any = np.stack(np.tile(UpperCamelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE : List[Any] = int(max_length / len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = np.stack(np.tile(UpperCamelCase__ , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = np.pad(UpperCamelCase__ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE : List[Any] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE : List[str] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__ : str = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE : List[str] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE : List[str] = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Any = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE : List[Any] = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : List[str] = [np.asarray(UpperCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE : int = [
self._get_input_mel(UpperCamelCase__ , max_length if max_length else self.nb_max_samples , UpperCamelCase__ , UpperCamelCase__ )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[str] = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase__ )
is_longer.append(UpperCamelCase__ )
if truncation == "fusion" and sum(UpperCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = True
if isinstance(input_mel[0] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE : Optional[Any] = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE : int = BatchFeature(UpperCamelCase__ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : int = input_features.convert_to_tensors(UpperCamelCase__ )
return input_features
| 34
| 0
|
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE : str = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoints.load_tax_checkpoint(_lowercase )
SCREAMING_SNAKE_CASE : Tuple = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
SCREAMING_SNAKE_CASE : int = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
SCREAMING_SNAKE_CASE : str = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE : List[str] = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
SCREAMING_SNAKE_CASE : List[str] = f"""layers_{str(_lowercase )}"""
# Self-Attention
SCREAMING_SNAKE_CASE : List[Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
SCREAMING_SNAKE_CASE : str = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
SCREAMING_SNAKE_CASE : List[Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
SCREAMING_SNAKE_CASE : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
SCREAMING_SNAKE_CASE : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
SCREAMING_SNAKE_CASE : int = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
SCREAMING_SNAKE_CASE : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
SCREAMING_SNAKE_CASE : int = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
SCREAMING_SNAKE_CASE : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
SCREAMING_SNAKE_CASE : Any = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
SCREAMING_SNAKE_CASE : List[str] = flax_model.params['''encoder''']['''block'''][str(_lowercase )]['''layer''']
SCREAMING_SNAKE_CASE : Dict = tax_attention_key
SCREAMING_SNAKE_CASE : List[Any] = tax_attention_out
SCREAMING_SNAKE_CASE : List[str] = tax_attention_query
SCREAMING_SNAKE_CASE : List[str] = tax_attention_value
SCREAMING_SNAKE_CASE : Any = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE : Dict = tax_global_layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE : List[str] = tax_mlp_wi_a
SCREAMING_SNAKE_CASE : Optional[Any] = tax_mlp_wi_a
else:
SCREAMING_SNAKE_CASE : Optional[Any] = tax_mlp_wi
SCREAMING_SNAKE_CASE : Tuple = tax_mlp_wo
SCREAMING_SNAKE_CASE : Optional[Any] = tax_mlp_layer_norm
SCREAMING_SNAKE_CASE : str = flax_model_encoder_layer_block
# Only for layer 0:
SCREAMING_SNAKE_CASE : List[Any] = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
SCREAMING_SNAKE_CASE : str = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE : int = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
SCREAMING_SNAKE_CASE : int = tax_encoder_global_rel_embedding
# Assigning
SCREAMING_SNAKE_CASE : Optional[int] = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
SCREAMING_SNAKE_CASE : int = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
SCREAMING_SNAKE_CASE : List[Any] = f"""layers_{str(_lowercase )}"""
# Self-Attention
SCREAMING_SNAKE_CASE : List[str] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
SCREAMING_SNAKE_CASE : Any = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
SCREAMING_SNAKE_CASE : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
SCREAMING_SNAKE_CASE : Dict = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
SCREAMING_SNAKE_CASE : Dict = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
SCREAMING_SNAKE_CASE : str = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
SCREAMING_SNAKE_CASE : str = tax_enc_dec_attention_module['''key''']['''kernel''']
SCREAMING_SNAKE_CASE : List[Any] = tax_enc_dec_attention_module['''out''']['''kernel''']
SCREAMING_SNAKE_CASE : int = tax_enc_dec_attention_module['''query''']['''kernel''']
SCREAMING_SNAKE_CASE : List[str] = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
SCREAMING_SNAKE_CASE : Tuple = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
SCREAMING_SNAKE_CASE : Any = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
SCREAMING_SNAKE_CASE : str = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
SCREAMING_SNAKE_CASE : List[str] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
SCREAMING_SNAKE_CASE : Dict = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
SCREAMING_SNAKE_CASE : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
SCREAMING_SNAKE_CASE : Any = flax_model.params['''decoder''']['''block'''][str(_lowercase )]['''layer''']
SCREAMING_SNAKE_CASE : Optional[Any] = tax_attention_key
SCREAMING_SNAKE_CASE : Union[str, Any] = tax_attention_out
SCREAMING_SNAKE_CASE : Dict = tax_attention_query
SCREAMING_SNAKE_CASE : Any = tax_attention_value
SCREAMING_SNAKE_CASE : Optional[int] = tax_pre_attention_layer_norm
SCREAMING_SNAKE_CASE : Union[str, Any] = tax_enc_dec_attention_key
SCREAMING_SNAKE_CASE : List[Any] = tax_enc_dec_attention_out
SCREAMING_SNAKE_CASE : List[Any] = tax_enc_dec_attention_query
SCREAMING_SNAKE_CASE : List[Any] = tax_enc_dec_attention_value
SCREAMING_SNAKE_CASE : Optional[Any] = tax_cross_layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE : Any = tax_mlp_wi_a
SCREAMING_SNAKE_CASE : Dict = tax_mlp_wi_a
else:
SCREAMING_SNAKE_CASE : Optional[int] = tax_mlp_wi
SCREAMING_SNAKE_CASE : int = tax_mlp_wo
SCREAMING_SNAKE_CASE : Optional[int] = txa_mlp_layer_norm
SCREAMING_SNAKE_CASE : Union[str, Any] = flax_model_decoder_layer_block
# Decoder Normalization
SCREAMING_SNAKE_CASE : Optional[Any] = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
SCREAMING_SNAKE_CASE : Optional[int] = txa_decoder_norm
# Only for layer 0:
SCREAMING_SNAKE_CASE : Tuple = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
SCREAMING_SNAKE_CASE : Any = tax_decoder_rel_embedding
# Token Embeddings
SCREAMING_SNAKE_CASE : Any = tax_model['''target''']['''token_embedder''']['''embedding''']
SCREAMING_SNAKE_CASE : List[str] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
SCREAMING_SNAKE_CASE : int = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(_lowercase )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
__UpperCamelCase : List[str] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 701
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """layoutlmv3"""
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any]=5_0265 , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Tuple=3072 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=512 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[Any]=1E-5 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : int=0 , UpperCamelCase__ : str=2 , UpperCamelCase__ : List[str]=1024 , UpperCamelCase__ : str=128 , UpperCamelCase__ : str=128 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[int]=32 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Optional[Any]=64 , UpperCamelCase__ : Dict=256 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Dict=224 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
vocab_size=UpperCamelCase__ , hidden_size=UpperCamelCase__ , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , intermediate_size=UpperCamelCase__ , hidden_act=UpperCamelCase__ , hidden_dropout_prob=UpperCamelCase__ , attention_probs_dropout_prob=UpperCamelCase__ , max_position_embeddings=UpperCamelCase__ , type_vocab_size=UpperCamelCase__ , initializer_range=UpperCamelCase__ , layer_norm_eps=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : List[str] = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : str = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Union[str, Any] = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Union[str, Any] = text_embed
SCREAMING_SNAKE_CASE : List[str] = visual_embed
SCREAMING_SNAKE_CASE : Optional[Any] = input_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = patch_size
SCREAMING_SNAKE_CASE : List[Any] = classifier_dropout
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = version.parse("""1.12""")
@property
def __A ( self : str ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __A ( self : int ):
'''simple docstring'''
return 1E-5
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Optional[Any] , UpperCamelCase__ : "ProcessorMixin" , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , UpperCamelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : List[Any] = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : Any = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = dict(
processor(
UpperCamelCase__ , text=UpperCamelCase__ , boxes=UpperCamelCase__ , return_tensors=UpperCamelCase__ , ) )
return inputs
| 34
| 0
|
from ..utils import DummyObject, requires_backends
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : int , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : str , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Any , *UpperCamelCase__ : Any , **UpperCamelCase__ : Any ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : int , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : int , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : str ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : str , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Dict ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : str , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCamelCase__ : int , **UpperCamelCase__ : str ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Any , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : int , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : str ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : int , *UpperCamelCase__ : Any , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCamelCase__ : Any , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCamelCase__ : str , **UpperCamelCase__ : str ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : int , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : str ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Any ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
| 702
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = FunnelTokenizer
UpperCamelCase_ = FunnelTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = True
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE : int = '''unwanted, running'''
return input_text, output_text
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
SCREAMING_SNAKE_CASE : int = tokenizer('''UNwant\u00E9d,running''' )
SCREAMING_SNAKE_CASE : Optional[Any] = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 34
| 0
|
from scipy.stats import pearsonr
import datasets
__UpperCamelCase : Tuple = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
__UpperCamelCase : Any = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
__UpperCamelCase : Tuple = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowercase__ ( datasets.Metric):
def __A ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __A ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict=False ):
'''simple docstring'''
if return_pvalue:
SCREAMING_SNAKE_CASE : List[Any] = pearsonr(UpperCamelCase__ , UpperCamelCase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCamelCase__ , UpperCamelCase__ )[0] )}
| 703
|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = dataset
SCREAMING_SNAKE_CASE : Optional[Any] = process
SCREAMING_SNAKE_CASE : Union[str, Any] = params
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : List[str] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.dataset[i]
SCREAMING_SNAKE_CASE : Optional[int] = self.process(UpperCamelCase__ , **self.params )
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = loader
SCREAMING_SNAKE_CASE : List[Any] = infer
SCREAMING_SNAKE_CASE : int = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = loader_batch_size
# Internal bookkeeping
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : int = None
def __len__( self : int ):
'''simple docstring'''
return len(self.loader )
def __iter__( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
SCREAMING_SNAKE_CASE : Optional[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
SCREAMING_SNAKE_CASE : Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
SCREAMING_SNAKE_CASE : int = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
SCREAMING_SNAKE_CASE : Tuple = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
SCREAMING_SNAKE_CASE : Any = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
SCREAMING_SNAKE_CASE : Tuple = next(self.iterator )
SCREAMING_SNAKE_CASE : List[Any] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Optional[int] = processed
else:
SCREAMING_SNAKE_CASE : int = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : int = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[Any] = observed_batch_size
# Setting internal index to unwrap the batch
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : int = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = iter(self.loader )
SCREAMING_SNAKE_CASE : List[Any] = None
return self
def __A ( self : List[str] ):
'''simple docstring'''
if self.subiterator is None:
SCREAMING_SNAKE_CASE : Dict = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
SCREAMING_SNAKE_CASE : Any = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
SCREAMING_SNAKE_CASE : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
SCREAMING_SNAKE_CASE : Union[str, Any] = next(self.subiterator )
return processed
class lowercase__ ( UpperCamelCase_):
def __iter__( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Tuple = self.loader_batch_item()
SCREAMING_SNAKE_CASE : Any = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
SCREAMING_SNAKE_CASE : Any = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Tuple = processed
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : List[str] = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[str] = observed_batch_size
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : str = 0
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Any = self.loader_batch_item()
SCREAMING_SNAKE_CASE : List[Any] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
SCREAMING_SNAKE_CASE : int = processed
SCREAMING_SNAKE_CASE : List[str] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
return accumulator
class lowercase__ ( UpperCamelCase_):
def __init__( self : Optional[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = dataset
SCREAMING_SNAKE_CASE : Dict = key
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Dict , UpperCamelCase__ : Tuple ):
'''simple docstring'''
return self.dataset[i][self.key]
class lowercase__ ( UpperCamelCase_):
def __init__( self : List[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataset
SCREAMING_SNAKE_CASE : List[str] = keya
SCREAMING_SNAKE_CASE : Tuple = keya
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 34
| 0
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_):
UpperCamelCase_ = 1
@register_to_config
def __init__( self : List[str] , UpperCamelCase__ : int = 1000 , UpperCamelCase__ : Optional[Union[np.ndarray, List[float]]] = None ):
'''simple docstring'''
self.set_timesteps(UpperCamelCase__ )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE : str = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
SCREAMING_SNAKE_CASE : Tuple = 4
# running values
SCREAMING_SNAKE_CASE : int = []
def __A ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = num_inference_steps
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
SCREAMING_SNAKE_CASE : Tuple = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
SCREAMING_SNAKE_CASE : int = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sin(steps * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE : Dict = (1.0 - self.betas**2) ** 0.5
SCREAMING_SNAKE_CASE : Optional[Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
SCREAMING_SNAKE_CASE : List[str] = timesteps.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = []
def __A ( self : Tuple , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
SCREAMING_SNAKE_CASE : Optional[int] = (self.timesteps == timestep).nonzero().item()
SCREAMING_SNAKE_CASE : Union[str, Any] = timestep_index + 1
SCREAMING_SNAKE_CASE : int = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase__ )
if len(self.ets ) == 1:
SCREAMING_SNAKE_CASE : Dict = self.ets[-1]
elif len(self.ets ) == 2:
SCREAMING_SNAKE_CASE : Optional[int] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
SCREAMING_SNAKE_CASE : str = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
SCREAMING_SNAKE_CASE : Optional[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
SCREAMING_SNAKE_CASE : Optional[int] = self._get_prev_sample(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : torch.FloatTensor , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return sample
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.alphas[timestep_index]
SCREAMING_SNAKE_CASE : List[str] = self.betas[timestep_index]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.alphas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Tuple = self.betas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Dict = (sample - sigma * ets) / max(UpperCamelCase__ , 1E-8 )
SCREAMING_SNAKE_CASE : Optional[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 704
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """deberta-v2"""
def __init__( self : Optional[Any] , UpperCamelCase__ : Any=12_8100 , UpperCamelCase__ : Optional[int]=1536 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : List[str]=24 , UpperCamelCase__ : Tuple=6144 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : List[Any]=1E-7 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=-1 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : str="gelu" , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = relative_attention
SCREAMING_SNAKE_CASE : Optional[Any] = max_relative_positions
SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCamelCase__ ) == str:
SCREAMING_SNAKE_CASE : Optional[int] = [x.strip() for x in pos_att_type.lower().split('''|''' )]
SCREAMING_SNAKE_CASE : Any = pos_att_type
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''pooler_hidden_size''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pooler_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = pooler_hidden_act
class lowercase__ ( UpperCamelCase_):
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Dict , UpperCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : "PreTrainedTokenizerBase" = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().generate_dummy_inputs(preprocessor=UpperCamelCase__ , framework=UpperCamelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 34
| 0
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
__UpperCamelCase : int = float('nan')
class lowercase__ :
def __init__( self : Any , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = sys.stdout
SCREAMING_SNAKE_CASE : Dict = open(UpperCamelCase__ , '''a''' )
def __getattr__( self : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return getattr(self.stdout , UpperCamelCase__ )
def __A ( self : List[Any] , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
self.stdout.write(UpperCamelCase__ )
# strip tqdm codes
self.file.write(re.sub(r'''^.*\r''' , '''''' , UpperCamelCase__ , 0 , re.M ) )
def A ( _lowercase=80 , _lowercase=False ):
SCREAMING_SNAKE_CASE : Union[str, Any] = []
# deal with critical env vars
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
SCREAMING_SNAKE_CASE : Optional[Any] = os.environ.get(_lowercase , _lowercase )
if val is not None:
cmd.append(f"""{key}={val}""" )
# python executable (not always needed if the script is executable)
SCREAMING_SNAKE_CASE : Tuple = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(_lowercase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Dict = ''''''
while len(_lowercase ) > 0:
current_line += f"""{cmd.pop(0 )} """
if len(_lowercase ) == 0 or len(_lowercase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_lowercase )
SCREAMING_SNAKE_CASE : Any = ''''''
return "\\\n".join(_lowercase )
def A ( _lowercase , _lowercase ):
# unwrap multi-line input
SCREAMING_SNAKE_CASE : Any = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += f""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
SCREAMING_SNAKE_CASE : Dict = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
SCREAMING_SNAKE_CASE : List[str] = subprocess.run(_lowercase , capture_output=_lowercase , text=_lowercase )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
SCREAMING_SNAKE_CASE : Union[str, Any] = variation.replace(''' ''' , '''-''' )
with open(Path(_lowercase ) / f"""log.{prefix}.stdout.txt""" , '''w''' ) as f:
f.write(result.stdout )
with open(Path(_lowercase ) / f"""log.{prefix}.stderr.txt""" , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(f"""{output_dir}/all_results.json""" , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : str = json.load(_lowercase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ):
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Optional[Any] = f"""{id}: {variation:<{longest_variation_len}}"""
SCREAMING_SNAKE_CASE : List[Any] = f"""{preamble}: """
SCREAMING_SNAKE_CASE : Union[str, Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_lowercase ) , desc=_lowercase , leave=_lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = process_run_single(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE : Tuple = single_run_metrics[target_metric_key]
if not math.isnan(_lowercase ):
metrics.append(_lowercase )
results.append(_lowercase )
outcome += "✓"
else:
outcome += "✘"
SCREAMING_SNAKE_CASE : Dict = f"""\33[2K\r{outcome}"""
if len(_lowercase ) > 0:
SCREAMING_SNAKE_CASE : Optional[int] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
SCREAMING_SNAKE_CASE : List[Any] = round(mean_metrics[target_metric_key] , 2 )
SCREAMING_SNAKE_CASE : Optional[int] = f"""{outcome} {mean_target}"""
if len(_lowercase ) > 1:
results_str += f""" {tuple(round(_lowercase , 2 ) for x in results )}"""
print(_lowercase )
SCREAMING_SNAKE_CASE : List[Any] = variation
return mean_metrics
else:
print(_lowercase )
return {variation_key: variation, target_metric_key: nan}
def A ( ):
SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return f"""
Datetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : str = pd.DataFrame(_lowercase )
SCREAMING_SNAKE_CASE : List[Any] = '''variation'''
SCREAMING_SNAKE_CASE : Dict = '''diff_%'''
SCREAMING_SNAKE_CASE : int = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
SCREAMING_SNAKE_CASE : Dict = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_lowercase ):
# as a fallback, use the minimal value as the sentinel
SCREAMING_SNAKE_CASE : Optional[Any] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = df.apply(
lambda _lowercase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
SCREAMING_SNAKE_CASE : List[str] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
SCREAMING_SNAKE_CASE : Any = df.reindex(_lowercase , axis='''columns''' ) # reorder cols
# capitalize
SCREAMING_SNAKE_CASE : int = df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
SCREAMING_SNAKE_CASE : List[Any] = df.rename(lambda _lowercase : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
SCREAMING_SNAKE_CASE : Optional[int] = df.rename(lambda _lowercase : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
SCREAMING_SNAKE_CASE : List[Any] = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_lowercase , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_lowercase , floatfmt='''.2f''' )]
print('''\n\n'''.join(_lowercase ) )
def A ( ):
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=_lowercase , type=_lowercase , required=_lowercase , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=_lowercase , type=_lowercase , nargs='''+''' , required=_lowercase , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=_lowercase , type=_lowercase , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=_lowercase , type=_lowercase , required=_lowercase , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=_lowercase , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=_lowercase , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=_lowercase , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=_lowercase , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE : Optional[int] = args.output_dir
Path(_lowercase ).mkdir(exist_ok=_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_base_command(_lowercase , _lowercase )
# split each dimension into its --foo variations
SCREAMING_SNAKE_CASE : str = [list(map(str.strip , re.split(R'''\|''' , _lowercase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
SCREAMING_SNAKE_CASE : Any = list(map(str.strip , map(''' '''.join , itertools.product(*_lowercase ) ) ) )
SCREAMING_SNAKE_CASE : List[Any] = max(len(_lowercase ) for x in variations )
# split wanted keys
SCREAMING_SNAKE_CASE : Union[str, Any] = args.report_metric_keys.split()
# capture prints into a log file for convenience
SCREAMING_SNAKE_CASE : Any = f"""benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"""
print(f"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(f"""and this script's output is also piped into {report_fn}""" )
SCREAMING_SNAKE_CASE : Optional[int] = Tee(_lowercase )
print(f"""\n*** Running {len(_lowercase )} benchmarks:""" )
print(f"""Base command: {' '.join(_lowercase )}""" )
SCREAMING_SNAKE_CASE : int = '''variation'''
SCREAMING_SNAKE_CASE : List[Any] = []
for id, variation in enumerate(tqdm(_lowercase , desc='''Total completion: ''' , leave=_lowercase ) ):
SCREAMING_SNAKE_CASE : Any = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _lowercase , _lowercase , _lowercase , _lowercase , args.target_metric_key , _lowercase , args.repeat_times , _lowercase , args.verbose , ) )
process_results(_lowercase , args.target_metric_key , _lowercase , args.base_variation , _lowercase )
if __name__ == "__main__":
main()
| 705
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : Any = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Any = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {int(_lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE : Optional[int] = BitConfig(
conv_layer=_lowercase , num_labels=1_000 , idalabel=_lowercase , labelaid=_lowercase , )
return config
def A ( _lowercase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE : str = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''bit.encoder.''' + name
return name
def A ( ):
SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def A ( _lowercase , _lowercase , _lowercase=False ):
SCREAMING_SNAKE_CASE : List[Any] = get_config(_lowercase )
# load original model from timm
SCREAMING_SNAKE_CASE : Optional[Any] = create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE : Optional[int] = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE : str = BitForImageClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# create image processor
SCREAMING_SNAKE_CASE : Optional[Any] = create_transform(**resolve_data_config({} , model=_lowercase ) )
SCREAMING_SNAKE_CASE : List[str] = transform.transforms
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE : Tuple = BitImageProcessor(
do_resize=_lowercase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowercase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = transform(_lowercase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : Optional[int] = processor(_lowercase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_lowercase , _lowercase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE : List[Any] = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 34
| 0
|
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
__UpperCamelCase : Optional[Any] = parse(importlib.metadata.version('torch'))
def A ( _lowercase , _lowercase , _lowercase ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
SCREAMING_SNAKE_CASE : List[Any] = STR_OPERATION_TO_FUNC[operation]
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = parse(importlib.metadata.version(_lowercase ) )
return operation(_lowercase , parse(_lowercase ) )
def A ( _lowercase , _lowercase ):
return compare_versions(_lowercase , _lowercase , _lowercase )
| 706
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__UpperCamelCase : str = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase : int = logging.getLogger()
def A ( ):
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('''-f''' )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
return args.f
def A ( _lowercase , _lowercase="eval" ):
SCREAMING_SNAKE_CASE : Dict = os.path.join(_lowercase , f"""{split}_results.json""" )
if os.path.exists(_lowercase ):
with open(_lowercase , '''r''' ) as f:
return json.load(_lowercase )
raise ValueError(f"""can't find {path}""" )
__UpperCamelCase : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase__ ( UpperCamelCase_):
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Tuple = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_glue.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : str = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_clm_flax.main()
SCREAMING_SNAKE_CASE : Dict = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_summarization_flax.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Dict = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_mlm_flax.main()
SCREAMING_SNAKE_CASE : List[Any] = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_ta_mlm_flax.main()
SCREAMING_SNAKE_CASE : Optional[int] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Any = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_ner.main()
SCREAMING_SNAKE_CASE : List[str] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_qa.main()
SCREAMING_SNAKE_CASE : str = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 34
| 0
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = IFPipeline
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __A ( self : Tuple ):
'''simple docstring'''
return self._get_dummy_components()
def __A ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : int=0 ):
'''simple docstring'''
if str(UpperCamelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self : List[str] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __A ( self : Any ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __A ( self : List[Any] ):
'''simple docstring'''
self._test_save_load_local()
def __A ( self : List[str] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __A ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
SCREAMING_SNAKE_CASE : Dict = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE : Tuple = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def A ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 707
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCamelCase : Dict = random.Random()
def A ( _lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ):
if rng is None:
SCREAMING_SNAKE_CASE : Any = global_rng
SCREAMING_SNAKE_CASE : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__ ( unittest.TestCase):
def __init__( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str=7 , UpperCamelCase__ : Any=400 , UpperCamelCase__ : List[str]=2000 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : str=30 , UpperCamelCase__ : Tuple=4_4100 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : str = min_seq_length
SCREAMING_SNAKE_CASE : Dict = max_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : Optional[Any] = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[int] = feature_size
SCREAMING_SNAKE_CASE : Tuple = num_audio_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = chunk_length
SCREAMING_SNAKE_CASE : str = sampling_rate
def __A ( self : Optional[Any] ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __A ( self : Tuple , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
def _flatten(UpperCamelCase__ : str ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
SCREAMING_SNAKE_CASE : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Optional[Any] = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = TvltFeatureExtractor
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = TvltFeatureExtractionTester(self )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''spectrogram_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''feature_size''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''num_audio_channels''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''hop_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''chunk_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''sampling_rate''' ) )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : Optional[int] = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Optional[int] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Tuple = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : int = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Any = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(
UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 , mask_audio=UpperCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE : Dict = np.asarray(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __A ( self : Optional[int] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : Dict = ds.sort('''id''' ).select(range(UpperCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : int = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
SCREAMING_SNAKE_CASE : str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCamelCase__ , atol=1E-4 ) )
| 34
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
UpperCamelCase_ = Features({"""audio""": Audio()})
UpperCamelCase_ = Features({"""transcription""": Value("""string""")})
UpperCamelCase_ = """audio"""
UpperCamelCase_ = """transcription"""
def __A ( self : Tuple , UpperCamelCase__ : List[str] ):
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , UpperCamelCase__ ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(self )
SCREAMING_SNAKE_CASE : Any = self.input_schema.copy()
SCREAMING_SNAKE_CASE : List[str] = features[self.audio_column]
SCREAMING_SNAKE_CASE : str = input_schema
return task_template
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 708
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_):
UpperCamelCase_ = 1
@register_to_config
def __init__( self : List[str] , UpperCamelCase__ : int = 1000 , UpperCamelCase__ : Optional[Union[np.ndarray, List[float]]] = None ):
'''simple docstring'''
self.set_timesteps(UpperCamelCase__ )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE : str = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
SCREAMING_SNAKE_CASE : Tuple = 4
# running values
SCREAMING_SNAKE_CASE : int = []
def __A ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = num_inference_steps
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
SCREAMING_SNAKE_CASE : Tuple = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
SCREAMING_SNAKE_CASE : int = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sin(steps * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE : Dict = (1.0 - self.betas**2) ** 0.5
SCREAMING_SNAKE_CASE : Optional[Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
SCREAMING_SNAKE_CASE : List[str] = timesteps.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = []
def __A ( self : Tuple , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
SCREAMING_SNAKE_CASE : Optional[int] = (self.timesteps == timestep).nonzero().item()
SCREAMING_SNAKE_CASE : Union[str, Any] = timestep_index + 1
SCREAMING_SNAKE_CASE : int = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase__ )
if len(self.ets ) == 1:
SCREAMING_SNAKE_CASE : Dict = self.ets[-1]
elif len(self.ets ) == 2:
SCREAMING_SNAKE_CASE : Optional[int] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
SCREAMING_SNAKE_CASE : str = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
SCREAMING_SNAKE_CASE : Optional[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
SCREAMING_SNAKE_CASE : Optional[int] = self._get_prev_sample(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : torch.FloatTensor , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return sample
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.alphas[timestep_index]
SCREAMING_SNAKE_CASE : List[str] = self.betas[timestep_index]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.alphas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Tuple = self.betas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Dict = (sample - sigma * ets) / max(UpperCamelCase__ , 1E-8 )
SCREAMING_SNAKE_CASE : Optional[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 34
| 0
|
import numpy as np
def A ( _lowercase ):
return 1 / (1 + np.exp(-vector ))
def A ( _lowercase ):
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = IFPipeline
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __A ( self : Tuple ):
'''simple docstring'''
return self._get_dummy_components()
def __A ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : int=0 ):
'''simple docstring'''
if str(UpperCamelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self : List[str] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __A ( self : Any ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __A ( self : List[Any] ):
'''simple docstring'''
self._test_save_load_local()
def __A ( self : List[str] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __A ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE : Tuple = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def A ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 34
| 0
|
def A ( _lowercase ) -> Any:
SCREAMING_SNAKE_CASE : int = [0 for i in range(len(_lowercase ) )]
# initialize interval's left pointer and right pointer
SCREAMING_SNAKE_CASE : List[Any] = 0, 0
for i in range(1 , len(_lowercase ) ):
# case when current index is inside the interval
if i <= right_pointer:
SCREAMING_SNAKE_CASE : Any = min(right_pointer - i + 1 , z_result[i - left_pointer] )
SCREAMING_SNAKE_CASE : Optional[int] = min_edge
while go_next(_lowercase , _lowercase , _lowercase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
SCREAMING_SNAKE_CASE : Optional[Any] = i, i + z_result[i] - 1
return z_result
def A ( _lowercase , _lowercase , _lowercase ) -> List[str]:
return i + z_result[i] < len(_lowercase ) and s[z_result[i]] == s[i + z_result[i]]
def A ( _lowercase , _lowercase ) -> Tuple:
SCREAMING_SNAKE_CASE : List[Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
SCREAMING_SNAKE_CASE : Optional[int] = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowercase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__UpperCamelCase : int = logging.get_logger(__name__)
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
def constraint_to_multiple_of(_lowercase , _lowercase , _lowercase=0 , _lowercase=None ):
SCREAMING_SNAKE_CASE : int = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE : Dict = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE : Optional[Any] = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE : Optional[Any] = (output_size, output_size) if isinstance(_lowercase , _lowercase ) else output_size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = get_image_size(_lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE : Dict = output_height / input_height
SCREAMING_SNAKE_CASE : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE : List[Any] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE : List[Any] = scale_height
SCREAMING_SNAKE_CASE : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = constraint_to_multiple_of(scale_width * input_width , multiple=_lowercase )
return (new_height, new_width)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""pixel_values"""]
def __init__( self : int , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'''height''': 384, '''width''': 384}
SCREAMING_SNAKE_CASE : Any = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : Any = size
SCREAMING_SNAKE_CASE : str = keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(
UpperCamelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCamelCase__ , multiple=UpperCamelCase__ , )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Dict = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Tuple = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Dict = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Any = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Any = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Optional[int] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Tuple] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE : Optional[int] = []
for idx in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : List[Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 34
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Any = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 711
|
import random
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(_lowercase )
elif element > pivot:
greater.append(_lowercase )
else:
equal.append(_lowercase )
return less, equal, greater
def A ( _lowercase , _lowercase ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_lowercase ) or index < 0:
return None
SCREAMING_SNAKE_CASE : Dict = items[random.randint(0 , len(_lowercase ) - 1 )]
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = _partition(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowercase , _lowercase )
# must be in larger
else:
return quick_select(_lowercase , index - (m + count) )
| 34
| 0
|
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__UpperCamelCase : int = sys.version_info >= (3, 10)
def A ( _lowercase=None , _lowercase=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_lowercase )
@dataclass
class lowercase__ :
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
@dataclass
class lowercase__ :
UpperCamelCase_ = 42
UpperCamelCase_ = field(default="""toto""" , metadata={"""help""": """help message"""})
@dataclass
class lowercase__ :
UpperCamelCase_ = False
UpperCamelCase_ = True
UpperCamelCase_ = None
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """titi"""
UpperCamelCase_ = """toto"""
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """titi"""
UpperCamelCase_ = """toto"""
UpperCamelCase_ = 42
@dataclass
class lowercase__ :
UpperCamelCase_ = """toto"""
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = BasicEnum(self.foo )
@dataclass
class lowercase__ :
UpperCamelCase_ = """toto"""
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = MixedTypeEnum(self.foo )
@dataclass
class lowercase__ :
UpperCamelCase_ = None
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """help message"""})
UpperCamelCase_ = None
UpperCamelCase_ = list_field(default=[])
UpperCamelCase_ = list_field(default=[])
@dataclass
class lowercase__ :
UpperCamelCase_ = list_field(default=[])
UpperCamelCase_ = list_field(default=[1, 2, 3])
UpperCamelCase_ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
UpperCamelCase_ = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class lowercase__ :
UpperCamelCase_ = field()
UpperCamelCase_ = field()
UpperCamelCase_ = field()
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = BasicEnum(self.required_enum )
@dataclass
class lowercase__ :
UpperCamelCase_ = 42
UpperCamelCase_ = field()
UpperCamelCase_ = None
UpperCamelCase_ = field(default="""toto""" , metadata={"""help""": """help message"""})
UpperCamelCase_ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
if is_python_no_less_than_3_10:
@dataclass
class lowercase__ :
UpperCamelCase_ = False
UpperCamelCase_ = True
UpperCamelCase_ = None
@dataclass
class lowercase__ :
UpperCamelCase_ = None
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """help message"""})
UpperCamelCase_ = None
UpperCamelCase_ = list_field(default=[])
UpperCamelCase_ = list_field(default=[])
class lowercase__ ( unittest.TestCase):
def __A ( self : Union[str, Any] , UpperCamelCase__ : argparse.ArgumentParser , UpperCamelCase__ : argparse.ArgumentParser ):
'''simple docstring'''
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
SCREAMING_SNAKE_CASE : Tuple = {k: v for k, v in vars(UpperCamelCase__ ).items() if k != '''container'''}
SCREAMING_SNAKE_CASE : Union[str, Any] = {k: v for k, v in vars(UpperCamelCase__ ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , UpperCamelCase__ ) and yy.get('''choices''' , UpperCamelCase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](UpperCamelCase__ ) , yy['''type'''](UpperCamelCase__ ) )
del xx["type"], yy["type"]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=UpperCamelCase__ , required=UpperCamelCase__ )
expected.add_argument('''--bar''' , type=UpperCamelCase__ , required=UpperCamelCase__ )
expected.add_argument('''--baz''' , type=UpperCamelCase__ , required=UpperCamelCase__ )
expected.add_argument('''--flag''' , type=UpperCamelCase__ , default=UpperCamelCase__ , const=UpperCamelCase__ , nargs='''?''' )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
(SCREAMING_SNAKE_CASE ) : Optional[int] = parser.parse_args_into_dataclasses(UpperCamelCase__ , look_for_args_file=UpperCamelCase__ )
self.assertFalse(example.flag )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=UpperCamelCase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=UpperCamelCase__ , help='''help message''' )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=UpperCamelCase__ , default=UpperCamelCase__ , const=UpperCamelCase__ , nargs='''?''' )
expected.add_argument('''--baz''' , type=UpperCamelCase__ , default=UpperCamelCase__ , const=UpperCamelCase__ , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=UpperCamelCase__ , dest='''baz''' )
expected.add_argument('''--opt''' , type=UpperCamelCase__ , default=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCamelCase__ )
for dataclass_type in dataclass_types:
SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser(UpperCamelCase__ )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args([] )
self.assertEqual(UpperCamelCase__ , Namespace(foo=UpperCamelCase__ , baz=UpperCamelCase__ , opt=UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(UpperCamelCase__ , Namespace(foo=UpperCamelCase__ , baz=UpperCamelCase__ , opt=UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(UpperCamelCase__ , Namespace(foo=UpperCamelCase__ , baz=UpperCamelCase__ , opt=UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(UpperCamelCase__ , Namespace(foo=UpperCamelCase__ , baz=UpperCamelCase__ , opt=UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : int = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(UpperCamelCase__ , Namespace(foo=UpperCamelCase__ , baz=UpperCamelCase__ , opt=UpperCamelCase__ ) )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = HfArgumentParser(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
SCREAMING_SNAKE_CASE : int = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
SCREAMING_SNAKE_CASE : int = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
SCREAMING_SNAKE_CASE : Dict = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __A ( self : Dict ):
'''simple docstring'''
@dataclass
class lowercase__ :
UpperCamelCase_ = """toto"""
SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
SCREAMING_SNAKE_CASE : str = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=UpperCamelCase__ )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=UpperCamelCase__ )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=UpperCamelCase__ )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=UpperCamelCase__ )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args([] )
self.assertEqual(
UpperCamelCase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(UpperCamelCase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=UpperCamelCase__ , type=UpperCamelCase__ )
expected.add_argument('''--bar''' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='''help message''' )
expected.add_argument('''--baz''' , default=UpperCamelCase__ , type=UpperCamelCase__ )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=UpperCamelCase__ )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCamelCase__ )
for dataclass_type in dataclass_types:
SCREAMING_SNAKE_CASE : str = HfArgumentParser(UpperCamelCase__ )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args([] )
self.assertEqual(UpperCamelCase__ , Namespace(foo=UpperCamelCase__ , bar=UpperCamelCase__ , baz=UpperCamelCase__ , ces=[] , des=[] ) )
SCREAMING_SNAKE_CASE : Any = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(UpperCamelCase__ , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=UpperCamelCase__ , required=UpperCamelCase__ )
expected.add_argument('''--required_str''' , type=UpperCamelCase__ , required=UpperCamelCase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=UpperCamelCase__ , )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = HfArgumentParser(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=UpperCamelCase__ , required=UpperCamelCase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=UpperCamelCase__ , )
expected.add_argument('''--opt''' , type=UpperCamelCase__ , default=UpperCamelCase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=UpperCamelCase__ , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=UpperCamelCase__ )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = HfArgumentParser(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_dict(UpperCamelCase__ )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = BasicExample(**UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(UpperCamelCase__ , parser.parse_dict , UpperCamelCase__ , allow_extra_keys=UpperCamelCase__ )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : Any = os.path.join(UpperCamelCase__ , '''temp_json''' )
os.mkdir(UpperCamelCase__ )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
SCREAMING_SNAKE_CASE : str = BasicExample(**UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = HfArgumentParser(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : int = os.path.join(UpperCamelCase__ , '''temp_yaml''' )
os.mkdir(UpperCamelCase__ )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
SCREAMING_SNAKE_CASE : List[Any] = BasicExample(**UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
| 712
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
# TODO Update this
__UpperCamelCase : List[str] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """esm"""
def __init__( self : Tuple , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any=768 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : str=12 , UpperCamelCase__ : Optional[int]=3072 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Union[str, Any]=1026 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = position_embedding_type
SCREAMING_SNAKE_CASE : Any = use_cache
SCREAMING_SNAKE_CASE : Dict = emb_layer_norm_before
SCREAMING_SNAKE_CASE : List[str] = token_dropout
SCREAMING_SNAKE_CASE : List[Any] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
SCREAMING_SNAKE_CASE : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = EsmFoldConfig(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
SCREAMING_SNAKE_CASE : Optional[int] = get_default_vocab_list()
else:
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_list
else:
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = None
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = 0
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Optional[int] ):
'''simple docstring'''
if self.trunk is None:
SCREAMING_SNAKE_CASE : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = TrunkConfig(**self.trunk )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = asdict(self )
SCREAMING_SNAKE_CASE : Tuple = self.trunk.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 48
UpperCamelCase_ = 1_024
UpperCamelCase_ = 128
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = False
UpperCamelCase_ = 4
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Any ):
'''simple docstring'''
if self.structure_module is None:
SCREAMING_SNAKE_CASE : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
SCREAMING_SNAKE_CASE : Dict = self.sequence_state_dim // self.sequence_head_width
SCREAMING_SNAKE_CASE : Tuple = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = asdict(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.structure_module.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 384
UpperCamelCase_ = 128
UpperCamelCase_ = 16
UpperCamelCase_ = 128
UpperCamelCase_ = 12
UpperCamelCase_ = 4
UpperCamelCase_ = 8
UpperCamelCase_ = 0.1
UpperCamelCase_ = 8
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 7
UpperCamelCase_ = 10
UpperCamelCase_ = 1E-8
UpperCamelCase_ = 1E5
def __A ( self : Dict ):
'''simple docstring'''
return asdict(self )
def A ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 34
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Any = {'vocab_file': 'spiece.model'}
__UpperCamelCase : str = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
__UpperCamelCase : List[Any] = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
__UpperCamelCase : Dict = '▁'
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any=True , UpperCamelCase__ : str=True , UpperCamelCase__ : str=False , UpperCamelCase__ : Optional[int]="[CLS]" , UpperCamelCase__ : str="[SEP]" , UpperCamelCase__ : Optional[int]="<unk>" , UpperCamelCase__ : str="[SEP]" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]="[CLS]" , UpperCamelCase__ : List[str]="[MASK]" , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = (
AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ , normalized=UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else mask_token
)
SCREAMING_SNAKE_CASE : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = do_lower_case
SCREAMING_SNAKE_CASE : List[str] = remove_space
SCREAMING_SNAKE_CASE : Tuple = keep_accents
SCREAMING_SNAKE_CASE : Tuple = vocab_file
SCREAMING_SNAKE_CASE : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
return len(self.sp_model )
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE : Tuple = None
return state
def __setstate__( self : List[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self : Dict , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
if self.remove_space:
SCREAMING_SNAKE_CASE : str = ''' '''.join(inputs.strip().split() )
else:
SCREAMING_SNAKE_CASE : Optional[int] = inputs
SCREAMING_SNAKE_CASE : List[str] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
SCREAMING_SNAKE_CASE : Any = unicodedata.normalize('''NFKD''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = ''''''.join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.lower()
return outputs
def __A ( self : Any , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.preprocess_text(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = []
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
SCREAMING_SNAKE_CASE : Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
SCREAMING_SNAKE_CASE : Tuple = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE : List[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def __A ( self : Dict , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return self.sp_model.PieceToId(UpperCamelCase__ )
def __A ( self : Union[str, Any] , UpperCamelCase__ : Tuple ):
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCamelCase__ )
def __A ( self : List[Any] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : List[str] = ''''''
SCREAMING_SNAKE_CASE : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : List[str] = []
else:
current_sub_tokens.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = False
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self : Dict , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
def __A ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Dict = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE : int = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 713
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
])
class lowercase__ ( unittest.TestCase):
def __A ( self : Any ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=UpperCamelCase__ , )
assert hasattr(self , '''env''' )
def __A ( self : str , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
SCREAMING_SNAKE_CASE : Any = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCamelCase__ , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version='''py36''' , )
def __A ( self : Optional[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __A ( self : Tuple , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCamelCase__ )
| 34
| 0
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
__UpperCamelCase : Dict = logging.getLogger(__name__)
__UpperCamelCase : str = {'facebook/bart-base': BartForConditionalGeneration}
__UpperCamelCase : Dict = {'facebook/bart-base': BartTokenizer}
def A ( ):
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=_lowercase , default=_lowercase , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=_lowercase , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=_lowercase , default=_lowercase , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=_lowercase , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_lowercase , )
parser.add_argument(
'''--config_name''' , type=_lowercase , default=_lowercase , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=_lowercase , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=_lowercase , default=_lowercase , help='''Where to store the final ONNX file.''' )
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
return args
def A ( _lowercase , _lowercase="cpu" ):
SCREAMING_SNAKE_CASE : Dict = model_dict[model_name].from_pretrained(_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_dict[model_name].from_pretrained(_lowercase )
if model_name in ["facebook/bart-base"]:
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : int = 0
return huggingface_model, tokenizer
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
model.eval()
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Dict = torch.jit.script(BARTBeamSearchGenerator(_lowercase ) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = '''My friends are cool but they eat too many carbs.'''
SCREAMING_SNAKE_CASE : Tuple = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='''pt''' ).to(model.device )
SCREAMING_SNAKE_CASE : str = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=_lowercase , max_length=_lowercase , early_stopping=_lowercase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_lowercase , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _lowercase , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=_lowercase , )
logger.info('''Model exported to {}'''.format(_lowercase ) )
SCREAMING_SNAKE_CASE : str = remove_dup_initializers(os.path.abspath(_lowercase ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(_lowercase ) )
SCREAMING_SNAKE_CASE : int = onnxruntime.InferenceSession(_lowercase )
SCREAMING_SNAKE_CASE : int = ort_sess.run(
_lowercase , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(_lowercase ),
'''max_length''': np.array(_lowercase ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def A ( ):
SCREAMING_SNAKE_CASE : Any = parse_args()
SCREAMING_SNAKE_CASE : str = 5
SCREAMING_SNAKE_CASE : Optional[int] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
SCREAMING_SNAKE_CASE : int = torch.device(args.device )
SCREAMING_SNAKE_CASE : List[Any] = load_model_tokenizer(args.model_name_or_path , _lowercase )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(_lowercase )
if args.max_length:
SCREAMING_SNAKE_CASE : Optional[Any] = args.max_length
if args.num_beams:
SCREAMING_SNAKE_CASE : Dict = args.num_beams
if args.output_file_path:
SCREAMING_SNAKE_CASE : Optional[int] = args.output_file_path
else:
SCREAMING_SNAKE_CASE : Optional[Any] = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
main()
| 714
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCamelCase : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCamelCase : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if ' ' in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__UpperCamelCase : Optional[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 34
| 0
|
import re
import string
import numpy as np
import datasets
__UpperCamelCase : List[str] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__UpperCamelCase : Optional[Any] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__UpperCamelCase : Union[str, Any] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowercase__ ( datasets.Metric):
def __A ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def __A ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Union[str, Any]=False , ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
SCREAMING_SNAKE_CASE : int = np.array([re.sub(UpperCamelCase__ , '''''' , UpperCamelCase__ ) for x in predictions] )
SCREAMING_SNAKE_CASE : List[Any] = np.array([re.sub(UpperCamelCase__ , '''''' , UpperCamelCase__ ) for x in references] )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = np.asarray(UpperCamelCase__ )
if ignore_case:
SCREAMING_SNAKE_CASE : Dict = np.char.lower(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = np.char.lower(UpperCamelCase__ )
if ignore_punctuation:
SCREAMING_SNAKE_CASE : Optional[int] = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
SCREAMING_SNAKE_CASE : List[str] = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
if ignore_numbers:
SCREAMING_SNAKE_CASE : Any = string.digits.maketrans('''''' , '''''' , string.digits )
SCREAMING_SNAKE_CASE : Tuple = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = predictions == references
return {"exact_match": np.mean(UpperCamelCase__ ) * 100}
| 715
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Dict = None
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Union[str, Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = TaTokenizer
UpperCamelCase_ = []
def __init__( self : str , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]=100 , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE : List[str] = [f"""<extra_id_{i}>""" for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
SCREAMING_SNAKE_CASE : int = len(set(filter(lambda UpperCamelCase__ : bool('''extra_id_''' in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : int = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : str = extra_ids
@staticmethod
def __A ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
SCREAMING_SNAKE_CASE : List[str] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase__ , )
return max_model_length
def __A ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Any = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def __A ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
SCREAMING_SNAKE_CASE : Tuple = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __A ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(r'''<extra_id_\d+>''' , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self : List[Any] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 34
| 0
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowercase__ :
UpperCamelCase_ = None
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE : Any = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Dict = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 716
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCamelCase : str = False
class lowercase__ ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = VersatileDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = '''cyberpunk 2077'''
SCREAMING_SNAKE_CASE : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe.dual_guided(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe.text_to_image(
prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.image_variation(UpperCamelCase__ , generator=UpperCamelCase__ , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 34
| 0
|
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCamelCase : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCamelCase : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if ' ' in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__UpperCamelCase : Optional[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 717
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def A ( _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = analyze_text(_lowercase )
SCREAMING_SNAKE_CASE : Any = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
SCREAMING_SNAKE_CASE : Tuple = sum(single_char_strings.values() )
# one length string
SCREAMING_SNAKE_CASE : Tuple = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
SCREAMING_SNAKE_CASE : Tuple = single_char_strings[ch]
SCREAMING_SNAKE_CASE : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
SCREAMING_SNAKE_CASE : Optional[Any] = sum(two_char_strings.values() )
SCREAMING_SNAKE_CASE : List[str] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
SCREAMING_SNAKE_CASE : Union[str, Any] = cha + cha
if sequence in two_char_strings:
SCREAMING_SNAKE_CASE : Any = two_char_strings[sequence]
SCREAMING_SNAKE_CASE : Dict = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = Counter() # type: ignore
SCREAMING_SNAKE_CASE : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def A ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 34
| 0
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__UpperCamelCase : Optional[int] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__UpperCamelCase : str = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A ( ):
SCREAMING_SNAKE_CASE : Optional[Any] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
SCREAMING_SNAKE_CASE : Optional[Any] = bs[:]
SCREAMING_SNAKE_CASE : int = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowercase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE : Optional[Any] = [chr(_lowercase ) for n in cs]
return dict(zip(_lowercase , _lowercase ) )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = set()
SCREAMING_SNAKE_CASE : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE : int = char
return pairs
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]="replace" , UpperCamelCase__ : Any="<s>" , UpperCamelCase__ : Optional[int]="</s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Any="<s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : str="<pad>" , UpperCamelCase__ : int="<mask>" , UpperCamelCase__ : List[str]=False , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
SCREAMING_SNAKE_CASE : Dict = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
SCREAMING_SNAKE_CASE : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
SCREAMING_SNAKE_CASE : Any = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE : Tuple = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
with open(UpperCamelCase__ , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE : Optional[int] = json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE : Union[str, Any] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE : Union[str, Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE : int = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase__ , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE : Optional[int] = merges_handle.read().split('''\n''' )[1:-1]
SCREAMING_SNAKE_CASE : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE : str = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
SCREAMING_SNAKE_CASE : Dict = {}
SCREAMING_SNAKE_CASE : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE : str = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.encoder )
def __A ( self : Dict ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self : int , UpperCamelCase__ : str ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE : str = tuple(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE : Dict = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE : Tuple = bigram
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : str = 0
while i < len(UpperCamelCase__ ):
try:
SCREAMING_SNAKE_CASE : Optional[int] = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE : List[str] = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE : Optional[int] = tuple(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = get_pairs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = ''' '''.join(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = word
return word
def __A ( self : List[Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = []
for token in re.findall(self.pat , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Dict = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(''' ''' ) )
return bpe_tokens
def __A ( self : Dict , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def __A ( self : Optional[int] , UpperCamelCase__ : str ):
'''simple docstring'''
return self.decoder.get(UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''.join(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __A ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Dict = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '''\n''' )
SCREAMING_SNAKE_CASE : Optional[int] = 0
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
SCREAMING_SNAKE_CASE : List[str] = token_index
writer.write(''' '''.join(UpperCamelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __A ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def __A ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=False , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE : str = ''' ''' + text
return (text, kwargs)
def __A ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def __A ( self : List[Any] , UpperCamelCase__ : "Conversation" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = ''' '''.join(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = self.encode(UpperCamelCase__ )
if len(UpperCamelCase__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 718
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Tuple = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """deberta-v2"""
def __init__( self : Optional[Any] , UpperCamelCase__ : Any=12_8100 , UpperCamelCase__ : Optional[int]=1536 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : List[str]=24 , UpperCamelCase__ : Tuple=6144 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : List[Any]=1E-7 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=-1 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : str="gelu" , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = relative_attention
SCREAMING_SNAKE_CASE : Optional[Any] = max_relative_positions
SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCamelCase__ ) == str:
SCREAMING_SNAKE_CASE : Optional[int] = [x.strip() for x in pos_att_type.lower().split('''|''' )]
SCREAMING_SNAKE_CASE : Any = pos_att_type
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''pooler_hidden_size''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pooler_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = pooler_hidden_act
class lowercase__ ( UpperCamelCase_):
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Dict , UpperCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : "PreTrainedTokenizerBase" = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().generate_dummy_inputs(preprocessor=UpperCamelCase__ , framework=UpperCamelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 719
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Tuple = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['MaskFormerFeatureExtractor']
__UpperCamelCase : List[Any] = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
__UpperCamelCase : Union[str, Any] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 34
| 0
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__UpperCamelCase : str = logging.get_logger(__name__)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""audio_values""", """audio_mask"""]
def __init__( self : Optional[Any] , UpperCamelCase__ : List[str]=2048 , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : str=[16, 16] , UpperCamelCase__ : List[str]=128 , UpperCamelCase__ : Union[str, Any]=4_4100 , UpperCamelCase__ : Optional[int]=86 , UpperCamelCase__ : Dict=2048 , UpperCamelCase__ : Any=0.0 , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = patch_size
SCREAMING_SNAKE_CASE : str = feature_size // self.patch_size[1]
SCREAMING_SNAKE_CASE : Any = n_fft
SCREAMING_SNAKE_CASE : List[str] = sampling_rate // hop_length_to_sampling_rate
SCREAMING_SNAKE_CASE : List[str] = sampling_rate
SCREAMING_SNAKE_CASE : Dict = padding_value
SCREAMING_SNAKE_CASE : Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCamelCase__ , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=UpperCamelCase__ , norm='''slaney''' , mel_scale='''slaney''' , ).T
def __A ( self : str , UpperCamelCase__ : np.array ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = spectrogram(
UpperCamelCase__ , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
SCREAMING_SNAKE_CASE : Any = log_spec[:, :-1]
SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0
SCREAMING_SNAKE_CASE : int = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Optional[int] , UpperCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[bool] = True , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE : Optional[Any] = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE : List[str] = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE : Any = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Dict = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
SCREAMING_SNAKE_CASE : List[str] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
SCREAMING_SNAKE_CASE : Tuple = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
SCREAMING_SNAKE_CASE : int = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(UpperCamelCase__ ).astype(np.floataa )
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Optional[int] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
SCREAMING_SNAKE_CASE : List[str] = np.ones([len(UpperCamelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
SCREAMING_SNAKE_CASE : Any = padded_audio_features * self.padding_value
for i in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = audio_features[i]
SCREAMING_SNAKE_CASE : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
SCREAMING_SNAKE_CASE : Optional[Any] = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
SCREAMING_SNAKE_CASE : Dict = {'''audio_values''': padded_audio_features}
SCREAMING_SNAKE_CASE : Optional[int] = BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
return encoded_inputs
| 720
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCamelCase : Dict = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def A ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = True
while ask_again:
SCREAMING_SNAKE_CASE : Optional[Any] = input(_lowercase )
try:
if default is not None and len(_lowercase ) == 0:
return default
return convert_value(_lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowercase )
def A ( _lowercase , _lowercase=[] , _lowercase=None , _lowercase=0 ):
SCREAMING_SNAKE_CASE : Dict = BulletMenu(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : str = menu.run(default_choice=_lowercase )
return convert_value(_lowercase ) if convert_value is not None else result
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = int(_lowercase )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def A ( _lowercase ):
return {"yes": True, "no": False}[value.lower()]
class lowercase__ ( argparse.RawDescriptionHelpFormatter):
def __A ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = super()._format_usage(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 34
| 0
|
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase__ :
UpperCamelCase_ = 42
UpperCamelCase_ = 42
class lowercase__ :
def __init__( self : int , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : list[list[Edge]] = [[] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE : Union[str, Any] = size
def __getitem__( self : Optional[Any] , UpperCamelCase__ : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def __A ( self : Dict ):
'''simple docstring'''
return self._size
def __A ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(UpperCamelCase__ , UpperCamelCase__ ) )
def __A ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = deque([start_vertex] )
SCREAMING_SNAKE_CASE : list[int | None] = [None] * self.size
SCREAMING_SNAKE_CASE : str = 0
while queue:
SCREAMING_SNAKE_CASE : Dict = queue.popleft()
SCREAMING_SNAKE_CASE : str = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
SCREAMING_SNAKE_CASE : List[str] = current_distance + edge.weight
SCREAMING_SNAKE_CASE : List[str] = distances[edge.destination_vertex]
if (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and new_distance >= dest_vertex_distance
):
continue
SCREAMING_SNAKE_CASE : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
from __future__ import annotations
from typing import Any
class lowercase__ ( UpperCamelCase_):
pass
class lowercase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = data
SCREAMING_SNAKE_CASE : Node | None = None
def __iter__( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self
SCREAMING_SNAKE_CASE : Tuple = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase__ )
yield node.data
SCREAMING_SNAKE_CASE : Dict = node.next_node
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__UpperCamelCase : List[Any] = Node(1)
__UpperCamelCase : str = Node(2)
__UpperCamelCase : Dict = Node(3)
__UpperCamelCase : List[Any] = Node(4)
print(root_node.has_loop) # False
__UpperCamelCase : int = root_node.next_node
print(root_node.has_loop) # True
__UpperCamelCase : Union[str, Any] = Node(5)
__UpperCamelCase : Union[str, Any] = Node(6)
__UpperCamelCase : List[Any] = Node(5)
__UpperCamelCase : List[str] = Node(6)
print(root_node.has_loop) # False
__UpperCamelCase : List[Any] = Node(1)
print(root_node.has_loop) # False
| 34
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class a_ ( lowerCamelCase ):
lowercase = 42
class a_ ( lowerCamelCase , lowerCamelCase ):
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 64 , _SCREAMING_SNAKE_CASE = 20 , _SCREAMING_SNAKE_CASE = 768 , _SCREAMING_SNAKE_CASE=77 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = "silu" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "linear" , _SCREAMING_SNAKE_CASE = "prd" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase = num_attention_heads
UpperCamelCase = attention_head_dim
UpperCamelCase = num_attention_heads * attention_head_dim
UpperCamelCase = additional_embeddings
UpperCamelCase = time_embed_dim or inner_dim
UpperCamelCase = embedding_proj_dim or embedding_dim
UpperCamelCase = clip_embed_dim or embedding_dim
UpperCamelCase = Timesteps(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 )
UpperCamelCase = TimestepEmbedding(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_dim=_SCREAMING_SNAKE_CASE , act_fn=_SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if embedding_proj_norm_type is None:
UpperCamelCase = None
elif embedding_proj_norm_type == "layer":
UpperCamelCase = nn.LayerNorm(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
UpperCamelCase = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if encoder_hid_proj_type is None:
UpperCamelCase = None
elif encoder_hid_proj_type == "linear":
UpperCamelCase = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
UpperCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _SCREAMING_SNAKE_CASE ) )
if added_emb_type == "prd":
UpperCamelCase = nn.Parameter(torch.zeros(1 , 1 , _SCREAMING_SNAKE_CASE ) )
elif added_emb_type is None:
UpperCamelCase = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
UpperCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dropout=_SCREAMING_SNAKE_CASE , activation_fn="""gelu""" , attention_bias=_SCREAMING_SNAKE_CASE , )
for d in range(_SCREAMING_SNAKE_CASE )
] )
if norm_in_type == "layer":
UpperCamelCase = nn.LayerNorm(_SCREAMING_SNAKE_CASE )
elif norm_in_type is None:
UpperCamelCase = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." )
UpperCamelCase = nn.LayerNorm(_SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
UpperCamelCase = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , _SCREAMING_SNAKE_CASE , persistent=_SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.Parameter(torch.zeros(1 , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase = nn.Parameter(torch.zeros(1 , _SCREAMING_SNAKE_CASE ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A__ ( self ) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
UpperCamelCase = {}
def fn_recursive_add_processors(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if hasattr(_SCREAMING_SNAKE_CASE , """set_processor""" ):
UpperCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return processors
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase = len(self.attn_processors.keys() )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(_SCREAMING_SNAKE_CASE )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if hasattr(_SCREAMING_SNAKE_CASE , """set_processor""" ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
module.set_processor(_SCREAMING_SNAKE_CASE )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for name, module in self.named_children():
fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Any:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = hidden_states.shape[0]
UpperCamelCase = timestep
if not torch.is_tensor(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_SCREAMING_SNAKE_CASE ) and len(timesteps.shape ) == 0:
UpperCamelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase = timesteps * torch.ones(_SCREAMING_SNAKE_CASE , dtype=timesteps.dtype , device=timesteps.device )
UpperCamelCase = self.time_proj(_SCREAMING_SNAKE_CASE )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCamelCase = timesteps_projected.to(dtype=self.dtype )
UpperCamelCase = self.time_embedding(_SCREAMING_SNAKE_CASE )
if self.embedding_proj_norm is not None:
UpperCamelCase = self.embedding_proj_norm(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.embedding_proj(_SCREAMING_SNAKE_CASE )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCamelCase = self.encoder_hidden_states_proj(_SCREAMING_SNAKE_CASE )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
UpperCamelCase = self.proj_in(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.positional_embedding.to(hidden_states.dtype )
UpperCamelCase = []
UpperCamelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(_SCREAMING_SNAKE_CASE )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCamelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCamelCase = hidden_states[:, None, :]
UpperCamelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(_SCREAMING_SNAKE_CASE , -1 , -1 )
additional_embeds.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.cat(
_SCREAMING_SNAKE_CASE , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCamelCase = F.pad(
_SCREAMING_SNAKE_CASE , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCamelCase = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
UpperCamelCase = F.pad(_SCREAMING_SNAKE_CASE , (0, self.additional_embeddings) , value=0.0 )
UpperCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCamelCase = self.norm_in(_SCREAMING_SNAKE_CASE )
for block in self.transformer_blocks:
UpperCamelCase = block(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.norm_out(_SCREAMING_SNAKE_CASE )
if self.prd_embedding is not None:
UpperCamelCase = hidden_states[:, -1]
else:
UpperCamelCase = hidden_states[:, additional_embeddings_len:]
UpperCamelCase = self.proj_to_clip_embeddings(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 35
|
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
SCREAMING_SNAKE_CASE__ = CLIPImageProcessor()
SCREAMING_SNAKE_CASE__ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
SCREAMING_SNAKE_CASE__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 35
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.