code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A = logging.get_logger(__name__)
A = {'vocab_file': 'spiece.model'}
A = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
A = {'bert_for_seq_generation': 5_1_2}
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = []
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any]="<s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : int="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : Dict="<::::>" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Optional[int] , ) -> None:
_lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , sep_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
_lowerCamelCase = vocab_file
_lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
@property
def _snake_case ( self : Optional[Any] ) -> List[str]:
return self.sp_model.get_piece_size()
def _snake_case ( self : Tuple ) -> int:
_lowerCamelCase = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> Any:
_lowerCamelCase = self.__dict__.copy()
_lowerCamelCase = None
return state
def __setstate__( self : Tuple , snake_case__ : Optional[int] ) -> Dict:
_lowerCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCamelCase = {}
_lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self : Any , snake_case__ : str ) -> List[str]:
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _snake_case ( self : Optional[int] , snake_case__ : int ) -> Dict:
return self.sp_model.piece_to_id(snake_case__ )
def _snake_case ( self : Any , snake_case__ : Union[str, Any] ) -> Optional[int]:
_lowerCamelCase = self.sp_model.IdToPiece(snake_case__ )
return token
def _snake_case ( self : Union[str, Any] , snake_case__ : Optional[Any] ) -> List[str]:
_lowerCamelCase = []
_lowerCamelCase = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case__ ) + token
_lowerCamelCase = []
else:
current_sub_tokens.append(snake_case__ )
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def _snake_case ( self : Optional[Any] , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCamelCase = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , 'wb' ) as fi:
_lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 544
|
def lowerCamelCase ( UpperCamelCase : int , UpperCamelCase : int ) -> float:
return base * power(UpperCamelCase , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
A = int(input('Enter the base: ').strip())
A = int(input('Enter the exponent: ').strip())
A = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
A = 1 / result
print(F'''{base} to the power of {exponent} is {result}''')
| 544
| 1
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : List[Any] = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def _UpperCAmelCase ( self , a=0 ) -> Optional[Any]:
lowercase__ : Optional[int] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(a ) )
lowercase__ : List[Any] = np.random.RandomState(a )
lowercase__ : int = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=a )
lowercase__ : int = self.get_dummy_inputs()
lowercase__ : Optional[Any] = pipe(**a ).images
lowercase__ : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase__ : Union[str, Any] = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ : Optional[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a )
pipe.set_progress_bar_config(disable=a )
lowercase__ : List[Any] = self.get_dummy_inputs()
lowercase__ : str = pipe(**a ).images
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase__ : Union[str, Any] = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> str:
lowercase__ : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
# warmup pass to apply optimizations
lowercase__ : str = pipe(**self.get_dummy_inputs() )
lowercase__ : Optional[int] = self.get_dummy_inputs()
lowercase__ : int = pipe(**a ).images
lowercase__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase__ : str = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ : Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
lowercase__ : List[str] = self.get_dummy_inputs()
lowercase__ : Tuple = pipe(**a ).images
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase__ : List[Any] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ : Optional[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
lowercase__ : str = self.get_dummy_inputs()
lowercase__ : int = pipe(**a ).images
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase__ : Optional[int] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
lowercase__ : Optional[Any] = self.get_dummy_inputs()
lowercase__ : Tuple = pipe(**a ).images
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase__ : List[Any] = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def _UpperCAmelCase ( self ) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Tuple = ort.SessionOptions()
lowercase__ : List[Any] = False
return options
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowercase__ : Optional[int] = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
lowercase__ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
lowercase__ : int = 'A fantasy landscape, trending on artstation'
lowercase__ : int = np.random.RandomState(0 )
lowercase__ : str = pipe(
prompt=a , image=a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=a , output_type='np' , )
lowercase__ : List[str] = output.images
lowercase__ : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
lowercase__ : Optional[Any] = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowercase__ : Optional[int] = init_image.resize((7_6_8, 5_1_2) )
lowercase__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
lowercase__ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
lowercase__ : str = 'A fantasy landscape, trending on artstation'
lowercase__ : Any = np.random.RandomState(0 )
lowercase__ : Optional[int] = pipe(
prompt=a , image=a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=a , output_type='np' , )
lowercase__ : str = output.images
lowercase__ : Tuple = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
lowercase__ : List[str] = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 645
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _a ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[str] = ['''flax''']
def __init__( self, *A, **A ):
'''simple docstring'''
requires_backends(self, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
class _a ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[str] = ['''flax''']
def __init__( self, *A, **A ):
'''simple docstring'''
requires_backends(self, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
class _a ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = ['''flax''']
def __init__( self, *A, **A ):
'''simple docstring'''
requires_backends(self, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
class _a ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Dict = ['''flax''']
def __init__( self, *A, **A ):
'''simple docstring'''
requires_backends(self, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
class _a ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Union[str, Any] = ['''flax''']
def __init__( self, *A, **A ):
'''simple docstring'''
requires_backends(self, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
class _a ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Any = ['''flax''']
def __init__( self, *A, **A ):
'''simple docstring'''
requires_backends(self, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
class _a ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[str] = ['''flax''']
def __init__( self, *A, **A ):
'''simple docstring'''
requires_backends(self, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
class _a ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Any = ['''flax''']
def __init__( self, *A, **A ):
'''simple docstring'''
requires_backends(self, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
class _a ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[str] = ['''flax''']
def __init__( self, *A, **A ):
'''simple docstring'''
requires_backends(self, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
class _a ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Tuple = ['''flax''']
def __init__( self, *A, **A ):
'''simple docstring'''
requires_backends(self, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
class _a ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[int] = ['''flax''']
def __init__( self, *A, **A ):
'''simple docstring'''
requires_backends(self, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
class _a ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[int] = ['''flax''']
def __init__( self, *A, **A ):
'''simple docstring'''
requires_backends(self, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
class _a ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : int = ['''flax''']
def __init__( self, *A, **A ):
'''simple docstring'''
requires_backends(self, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['flax'] )
| 28
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'spiece.model'}
__A = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __init__( self: Any , __A: Tuple , __A: int=False , __A: Tuple=True , __A: Optional[Any]=False , __A: int="<s>" , __A: Union[str, Any]="</s>" , __A: Dict="<unk>" , __A: int="<sep>" , __A: Dict="<pad>" , __A: Union[str, Any]="<cls>" , __A: Optional[int]="<mask>" , __A: Optional[Any]=["<eop>", "<eod>"] , __A: Optional[Dict[str, Any]] = None , **__A: List[Any] , ) -> None:
_A = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , additional_special_tokens=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
_A = 3
_A = do_lower_case
_A = remove_space
_A = keep_accents
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
_A = jieba
_A = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __A ( self: Optional[Any] ) -> Optional[Any]:
return len(self.sp_model )
def __A ( self: int ) -> Optional[Any]:
_A = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Optional[Any] ) -> int:
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self: List[Any] , __A: List[Any] ) -> str:
_A = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self: int , __A: Dict ) -> Dict:
if self.remove_space:
_A = ''' '''.join(inputs.strip().split() )
else:
_A = inputs
_A = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
_A = unicodedata.normalize('''NFKD''' , __A )
_A = ''''''.join([c for c in outputs if not unicodedata.combining(__A )] )
if self.do_lower_case:
_A = outputs.lower()
return outputs
def __A ( self: List[Any] , __A: str ) -> List[str]:
_A = self.preprocess_text(__A )
_A = self.sp_model.encode(__A , out_type=__A )
_A = []
for piece in pieces:
if len(__A ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
_A = self.sp_model.EncodeAsPieces(piece[:-1].replace(__A , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_A = cur_pieces[1:]
else:
_A = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__A )
else:
new_pieces.append(__A )
return new_pieces
def __A ( self: str , __A: List[Any] ) -> Any:
return self.sp_model.PieceToId(__A )
def __A ( self: List[str] , __A: Union[str, Any] ) -> Tuple:
return self.sp_model.IdToPiece(__A )
def __A ( self: List[str] , __A: Optional[Any] ) -> Dict:
_A = ''''''.join(__A ).replace(__A , ''' ''' ).strip()
return out_string
def __A ( self: str , __A: List[int] , __A: Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __A ( self: Dict , __A: List[int] , __A: Optional[List[int]] = None , __A: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is not None:
return ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1, 1]
return ([0] * len(__A )) + [1, 1]
def __A ( self: Optional[Any] , __A: List[int] , __A: Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __A ( self: str , __A: str , __A: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A = os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , '''wb''' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def __A ( self: Tuple , *__A: str , **__A: List[Any] ) -> Any:
_A = super()._decode(*__A , **__A )
_A = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 484
| 0
|
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
lowercase_ = x
lowercase_ = y
for step in range(__lowerCAmelCase ): # noqa: B007
lowercase_ = a * a - b * b + x
lowercase_ = 2 * a * b + y
lowercase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(__lowerCAmelCase , 1 , 1 ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 8_00 , __lowerCAmelCase = 6_00 , __lowerCAmelCase = -0.6 , __lowerCAmelCase = 0 , __lowerCAmelCase = 3.2 , __lowerCAmelCase = 50 , __lowerCAmelCase = True , ) -> Image.Image:
'''simple docstring'''
lowercase_ = Image.new("""RGB""" , (image_width, image_height) )
lowercase_ = img.load()
# loop through the image-coordinates
for image_x in range(__lowerCAmelCase ):
for image_y in range(__lowerCAmelCase ):
# determine the figure-coordinates based on the image-coordinates
lowercase_ = figure_width / image_width * image_height
lowercase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowercase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowercase_ = get_distance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowercase_ = get_color_coded_rgb(__lowerCAmelCase )
else:
lowercase_ = get_black_and_white_rgb(__lowerCAmelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
UpperCAmelCase : Any = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 721
|
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> None:
'''simple docstring'''
lowercase_ , lowercase_ = analyze_text(__lowerCAmelCase )
lowercase_ = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
lowercase_ = sum(single_char_strings.values() )
# one length string
lowercase_ = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowercase_ = single_char_strings[ch]
lowercase_ = my_str / all_sum
my_fir_sum += prob * math.loga(__lowerCAmelCase ) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowercase_ = sum(two_char_strings.values() )
lowercase_ = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowercase_ = cha + cha
if sequence in two_char_strings:
lowercase_ = two_char_strings[sequence]
lowercase_ = int(__lowerCAmelCase ) / all_sum
my_sec_sum += prob * math.loga(__lowerCAmelCase )
# print second entropy
print(F'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple[dict, dict]:
'''simple docstring'''
lowercase_ = Counter() # type: ignore
lowercase_ = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _SCREAMING_SNAKE_CASE () -> str:
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 100
| 0
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Tuple = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] , **_UpperCAmelCase: Union[str, Any] ):
_lowerCAmelCase :str = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_UpperCAmelCase )
return config
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
self.check_over_configs(thresholding=_UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCAmelCase , prediction_type=_UpperCAmelCase , sample_max_value=_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Dict = self.scheduler_classes[0]
_lowerCAmelCase :str = self.get_scheduler_config()
_lowerCAmelCase :Dict = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :List[str] = self.scheduler_classes[0]
_lowerCAmelCase :Dict = self.get_scheduler_config()
_lowerCAmelCase :Tuple = scheduler_class(**_UpperCAmelCase )
_lowerCAmelCase :List[Any] = len(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = self.dummy_model()
_lowerCAmelCase :int = self.dummy_sample_deter
_lowerCAmelCase :Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase :List[str] = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase :int = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase :str = pred_prev_sample
_lowerCAmelCase :int = torch.sum(torch.abs(_UpperCAmelCase ) )
_lowerCAmelCase :Dict = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase :int = self.get_scheduler_config(prediction_type='v_prediction' )
_lowerCAmelCase :str = scheduler_class(**_UpperCAmelCase )
_lowerCAmelCase :List[str] = len(_UpperCAmelCase )
_lowerCAmelCase :str = self.dummy_model()
_lowerCAmelCase :Union[str, Any] = self.dummy_sample_deter
_lowerCAmelCase :List[str] = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase :List[str] = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase :Optional[Any] = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase :str = pred_prev_sample
_lowerCAmelCase :Optional[Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
_lowerCAmelCase :Any = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase :Union[str, Any] = self.get_scheduler_config()
_lowerCAmelCase :Union[str, Any] = scheduler_class(**_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
_lowerCAmelCase :str = scheduler.timesteps
for i, timestep in enumerate(_UpperCAmelCase ):
if i == len(_UpperCAmelCase ) - 1:
_lowerCAmelCase :List[str] = -1
else:
_lowerCAmelCase :Optional[int] = timesteps[i + 1]
_lowerCAmelCase :Dict = scheduler.previous_timestep(_UpperCAmelCase )
_lowerCAmelCase :List[str] = prev_t.item()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = self.scheduler_classes[0]
_lowerCAmelCase :Optional[int] = self.get_scheduler_config()
_lowerCAmelCase :List[str] = scheduler_class(**_UpperCAmelCase )
_lowerCAmelCase :List[str] = [100, 87, 50, 51, 0]
with self.assertRaises(_UpperCAmelCase , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :List[str] = self.scheduler_classes[0]
_lowerCAmelCase :Optional[Any] = self.get_scheduler_config()
_lowerCAmelCase :Optional[Any] = scheduler_class(**_UpperCAmelCase )
_lowerCAmelCase :int = [100, 87, 50, 1, 0]
_lowerCAmelCase :str = len(_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :List[str] = self.scheduler_classes[0]
_lowerCAmelCase :Optional[int] = self.get_scheduler_config()
_lowerCAmelCase :int = scheduler_class(**_UpperCAmelCase )
_lowerCAmelCase :str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCAmelCase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
| 687
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
a = logging.get_logger(__name__)
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = nn.functional.normalize(__magic_name__ )
_lowerCAmelCase :List[str] = nn.functional.normalize(__magic_name__ )
return torch.mm(__magic_name__ , normalized_text_embeds.t() )
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : str = CLIPConfig
lowerCamelCase : Any = ['CLIPEncoderLayer']
def __init__( self: Optional[int] , _UpperCAmelCase: CLIPConfig ):
super().__init__(_UpperCAmelCase )
_lowerCAmelCase :Any = CLIPVisionModel(config.vision_config )
_lowerCAmelCase :Optional[int] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase )
_lowerCAmelCase :int = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :Any = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :str = nn.Parameter(torch.ones(17 ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = nn.Parameter(torch.ones(3 ) , requires_grad=_UpperCAmelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Dict ):
_lowerCAmelCase :str = self.vision_model(_UpperCAmelCase )[1] # pooled_output
_lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase :Optional[int] = cosine_distance(_UpperCAmelCase , self.special_care_embeds ).cpu().float().numpy()
_lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds ).cpu().float().numpy()
_lowerCAmelCase :str = []
_lowerCAmelCase :List[Any] = image_embeds.shape[0]
for i in range(_UpperCAmelCase ):
_lowerCAmelCase :Optional[Any] = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase :List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_lowerCAmelCase :List[Any] = special_cos_dist[i][concept_idx]
_lowerCAmelCase :Dict = self.special_care_embeds_weights[concept_idx].item()
_lowerCAmelCase :List[Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
_lowerCAmelCase :Any = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
_lowerCAmelCase :Union[str, Any] = cos_dist[i][concept_idx]
_lowerCAmelCase :str = self.concept_embeds_weights[concept_idx].item()
_lowerCAmelCase :str = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_UpperCAmelCase )
result.append(_UpperCAmelCase )
_lowerCAmelCase :Any = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: torch.FloatTensor , _UpperCAmelCase: torch.FloatTensor ):
_lowerCAmelCase :Optional[int] = self.vision_model(_UpperCAmelCase )[1] # pooled_output
_lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase )
_lowerCAmelCase :Dict = cosine_distance(_UpperCAmelCase , self.special_care_embeds )
_lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase :Any = 0.0
_lowerCAmelCase :Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_lowerCAmelCase :Tuple = torch.any(special_scores > 0 , dim=1 )
_lowerCAmelCase :List[str] = special_care * 0.0_1
_lowerCAmelCase :Any = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_lowerCAmelCase :Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_lowerCAmelCase :List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 687
| 1
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
UpperCAmelCase = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
UpperCAmelCase = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
UpperCAmelCase = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def __snake_case ( self : str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def __snake_case ( self : Any , snake_case__ : Dict , snake_case__ : Optional[Any] ):
'''simple docstring'''
lowercase :Optional[Any] = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowercase :List[str] = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowercase :Optional[Any] = evaluate(dataset=snake_case__ , predictions=snake_case__ )
return score
| 708
|
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ):
__A : Tuple = PriorTransformer
__A : Any = "hidden_states"
@property
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :Union[str, Any] = 4
lowercase :List[Any] = 8
lowercase :Any = 7
lowercase :Optional[Any] = floats_tensor((batch_size, embedding_dim) ).to(snake_case__ )
lowercase :List[Any] = floats_tensor((batch_size, embedding_dim) ).to(snake_case__ )
lowercase :Union[str, Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(snake_case__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def __snake_case ( self : Dict , snake_case__ : Optional[Any]=0 ):
'''simple docstring'''
torch.manual_seed(snake_case__ )
lowercase :Union[str, Any] = 4
lowercase :Optional[int] = 8
lowercase :str = 7
lowercase :Dict = torch.randn((batch_size, embedding_dim) ).to(snake_case__ )
lowercase :Union[str, Any] = torch.randn((batch_size, embedding_dim) ).to(snake_case__ )
lowercase :Tuple = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(snake_case__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return (4, 8)
@property
def __snake_case ( self : Dict ):
'''simple docstring'''
return (4, 8)
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :Union[str, Any] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 4,
'''num_layers''': 2,
'''embedding_dim''': 8,
'''num_embeddings''': 7,
'''additional_embeddings''': 4,
}
lowercase :List[str] = self.dummy_input
return init_dict, inputs_dict
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase , lowercase :Optional[int] = PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' , output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(snake_case__ )
lowercase :List[Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase , lowercase :List[Any] = self.prepare_init_args_and_inputs_for_common()
lowercase :str = self.model_class(**snake_case__ )
lowercase :Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase :List[Any] = [*signature.parameters.keys()]
lowercase :Optional[Any] = ['''hidden_states''', '''timestep''']
self.assertListEqual(arg_names[:2] , snake_case__ )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :Optional[int] = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' )
lowercase :List[Any] = model.to(snake_case__ )
if hasattr(snake_case__ , '''set_default_attn_processor''' ):
model.set_default_attn_processor()
lowercase :List[str] = self.get_dummy_seed_input()
with torch.no_grad():
lowercase :List[Any] = model(**snake_case__ )[0]
lowercase :Dict = output[0, :5].flatten().cpu()
print(snake_case__ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowercase :str = torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(snake_case__ , snake_case__ , rtol=1e-2 ) )
@slow
class __magic_name__ ( unittest.TestCase ):
def __snake_case ( self : Tuple , snake_case__ : int=1 , snake_case__ : int=7_6_8 , snake_case__ : Optional[Any]=7_7 , snake_case__ : Union[str, Any]=0 ):
'''simple docstring'''
torch.manual_seed(snake_case__ )
lowercase :Any = batch_size
lowercase :Dict = embedding_dim
lowercase :int = num_embeddings
lowercase :int = torch.randn((batch_size, embedding_dim) ).to(snake_case__ )
lowercase :List[str] = torch.randn((batch_size, embedding_dim) ).to(snake_case__ )
lowercase :Optional[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(snake_case__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def __snake_case ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def __snake_case ( self : Dict , snake_case__ : Dict , snake_case__ : Union[str, Any] ):
'''simple docstring'''
lowercase :str = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' , subfolder='''prior''' )
model.to(snake_case__ )
lowercase :Union[str, Any] = self.get_dummy_seed_input(seed=snake_case__ )
with torch.no_grad():
lowercase :int = model(**snake_case__ )[0]
assert list(sample.shape ) == [1, 7_6_8]
lowercase :List[Any] = sample[0, :8].flatten().cpu()
print(snake_case__ )
lowercase :Union[str, Any] = torch.tensor(snake_case__ )
assert torch_all_close(snake_case__ , snake_case__ , atol=1e-3 )
| 475
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Optional[int] = 'data2vec-vision'
def __init__( self:int , _a:Tuple=7_68 , _a:int=12 , _a:Any=12 , _a:Optional[int]=30_72 , _a:Optional[int]="gelu" , _a:Any=0.0 , _a:Any=0.0 , _a:List[str]=0.02 , _a:Dict=1e-12 , _a:Tuple=2_24 , _a:Any=16 , _a:str=3 , _a:str=False , _a:Union[str, Any]=False , _a:Optional[int]=False , _a:Any=False , _a:Dict=0.1 , _a:Dict=0.1 , _a:str=True , _a:str=[3, 5, 7, 11] , _a:List[str]=[1, 2, 3, 6] , _a:List[str]=True , _a:Any=0.4 , _a:str=2_56 , _a:Union[str, Any]=1 , _a:int=False , _a:Optional[int]=2_55 , **_a:Dict , ):
super().__init__(**_a )
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = use_mask_token
snake_case__ = use_absolute_position_embeddings
snake_case__ = use_relative_position_bias
snake_case__ = use_shared_relative_position_bias
snake_case__ = layer_scale_init_value
snake_case__ = drop_path_rate
snake_case__ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case__ = out_indices
snake_case__ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case__ = use_auxiliary_head
snake_case__ = auxiliary_loss_weight
snake_case__ = auxiliary_channels
snake_case__ = auxiliary_num_convs
snake_case__ = auxiliary_concat_input
snake_case__ = semantic_loss_ignore_index
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Any = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return 1e-4
| 33
|
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 100 , ):
'''simple docstring'''
_lowerCAmelCase : str = x_start
_lowerCAmelCase : Optional[Any] = fnc(_lowerCamelCase )
_lowerCAmelCase : Dict = 0.0
for _ in range(_lowerCamelCase ):
# Approximates curve as a sequence of linear lines and sums their length
_lowerCAmelCase : Dict = (x_end - x_start) / steps + xa
_lowerCAmelCase : Tuple = fnc(_lowerCamelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
_lowerCAmelCase : Optional[Any] = xa
_lowerCAmelCase : Union[str, Any] = fxa
return length
if __name__ == "__main__":
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
_lowerCAmelCase = 1_0
while i <= 1_0_0_0_0_0:
print(F'''With {i} steps: {line_length(f, -1_0, 1_0, i)}''')
i *= 1_0
| 259
| 0
|
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: int = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$" , __magic_name__ ).groups()[0]
class A ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Any=None , _UpperCamelCase : List[str]=None):
_lowercase: Union[str, Any] = file_names
_lowercase: Tuple = image_transform
_lowercase: List[str] = label_to_id
def __len__( self : Any):
return len(self.file_names)
def __getitem__( self : List[Any] , _UpperCamelCase : Optional[Any]):
_lowercase: int = self.file_names[idx]
_lowercase: List[str] = PIL.Image.open(_UpperCamelCase)
_lowercase: Any = raw_image.convert("RGB")
if self.image_transform is not None:
_lowercase: List[str] = self.image_transform(_UpperCamelCase)
_lowercase: List[str] = extract_label(_UpperCamelCase)
if self.label_to_id is not None:
_lowercase: int = self.label_to_id[label]
return {"image": image, "label": label}
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
# Initialize accelerator
if args.with_tracking:
_lowercase: Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
_lowercase: Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase: Dict = config["lr"]
_lowercase: Union[str, Any] = int(config["num_epochs"] )
_lowercase: Dict = int(config["seed"] )
_lowercase: Optional[int] = int(config["batch_size"] )
_lowercase: Union[str, Any] = config["image_size"]
if not isinstance(__magic_name__ , (list, tuple) ):
_lowercase: Any = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
_lowercase: Tuple = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_lowercase: Union[str, Any] = int(args.checkpointing_steps )
else:
raise ValueError(
f"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." )
else:
_lowercase: Optional[int] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_lowercase: Optional[Any] = os.path.split(__magic_name__ )[-1].split("." )[0]
accelerator.init_trackers(__magic_name__ , __magic_name__ )
# Grab all the image filenames
_lowercase: int = [os.path.join(args.data_dir , __magic_name__ ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
_lowercase: List[str] = [extract_label(__magic_name__ ) for fname in file_names]
_lowercase: Optional[Any] = list(set(__magic_name__ ) )
id_to_label.sort()
_lowercase: Tuple = {lbl: i for i, lbl in enumerate(__magic_name__ )}
# Set the seed before splitting the data.
np.random.seed(__magic_name__ )
torch.manual_seed(__magic_name__ )
torch.cuda.manual_seed_all(__magic_name__ )
# Split our filenames between train and validation
_lowercase: Optional[Any] = np.random.permutation(len(__magic_name__ ) )
_lowercase: Optional[Any] = int(0.8 * len(__magic_name__ ) )
_lowercase: Union[str, Any] = random_perm[:cut]
_lowercase: Dict = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_lowercase: str = Compose([RandomResizedCrop(__magic_name__ , scale=(0.5, 1.0) ), ToTensor()] )
_lowercase: int = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__magic_name__ , label_to_id=__magic_name__ )
# For evaluation, we use a deterministic Resize
_lowercase: str = Compose([Resize(__magic_name__ ), ToTensor()] )
_lowercase: List[Any] = PetsDataset([file_names[i] for i in eval_split] , image_transform=__magic_name__ , label_to_id=__magic_name__ )
# Instantiate dataloaders.
_lowercase: int = DataLoader(__magic_name__ , shuffle=__magic_name__ , batch_size=__magic_name__ , num_workers=4 )
_lowercase: str = DataLoader(__magic_name__ , shuffle=__magic_name__ , batch_size=__magic_name__ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase: Tuple = create_model("resnet50d" , pretrained=__magic_name__ , num_classes=len(__magic_name__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowercase: str = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_lowercase: Optional[Any] = False
for param in model.get_classifier().parameters():
_lowercase: int = True
# We normalize the batches of images to be a bit faster.
_lowercase: Any = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
_lowercase: Dict = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_lowercase: Any = torch.optim.Adam(params=model.parameters() , lr=lr / 2_5 )
# Instantiate learning rate scheduler
_lowercase: Optional[int] = OneCycleLR(optimizer=__magic_name__ , max_lr=__magic_name__ , epochs=__magic_name__ , steps_per_epoch=len(__magic_name__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase: Tuple = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# We need to keep track of how many total steps we have iterated over
_lowercase: List[Any] = 0
# We also need to keep track of the starting epoch so files are named properly
_lowercase: Dict = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}" )
accelerator.load_state(args.resume_from_checkpoint )
_lowercase: Dict = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_lowercase: Optional[Any] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_lowercase: Optional[Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_lowercase: List[Any] = os.path.splitext(__magic_name__ )[0]
if "epoch" in training_difference:
_lowercase: List[Any] = int(training_difference.replace("epoch_" , "" ) ) + 1
_lowercase: Optional[Any] = None
else:
_lowercase: Tuple = int(training_difference.replace("step_" , "" ) )
_lowercase: List[str] = resume_step // len(__magic_name__ )
resume_step -= starting_epoch * len(__magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ , __magic_name__ ):
model.train()
if args.with_tracking:
_lowercase: Optional[Any] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_lowercase: Tuple = accelerator.skip_first_batches(__magic_name__ , __magic_name__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_lowercase: List[Any] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_lowercase: int = {k: v.to(accelerator.device ) for k, v in batch.items()}
_lowercase: List[Any] = (batch["image"] - mean) / std
_lowercase: str = model(__magic_name__ )
_lowercase: Union[str, Any] = torch.nn.functional.cross_entropy(__magic_name__ , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__magic_name__ , __magic_name__ ):
_lowercase: List[str] = f"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_lowercase: Union[str, Any] = os.path.join(args.output_dir , __magic_name__ )
accelerator.save_state(__magic_name__ )
model.eval()
_lowercase: List[str] = 0
_lowercase: List[str] = 0
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_lowercase: Dict = {k: v.to(accelerator.device ) for k, v in batch.items()}
_lowercase: List[str] = (batch["image"] - mean) / std
with torch.no_grad():
_lowercase: List[Any] = model(__magic_name__ )
_lowercase: Optional[int] = outputs.argmax(dim=-1 )
_lowercase , _lowercase: Dict = accelerator.gather_for_metrics((predictions, batch["label"]) )
_lowercase: Tuple = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_lowercase: Any = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}: {1_0_0 * eval_metric:.2f}" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 1_0_0 * eval_metric,
"train_loss": total_loss.item() / len(__magic_name__ ),
"epoch": epoch,
} , step=__magic_name__ , )
if checkpointing_steps == "epoch":
_lowercase: Any = f"epoch_{epoch}"
if args.output_dir is not None:
_lowercase: Tuple = os.path.join(args.output_dir , __magic_name__ )
accelerator.save_state(__magic_name__ )
if args.with_tracking:
accelerator.end_training()
def __lowerCAmelCase ( ):
_lowercase: Tuple = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=__magic_name__ , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=__magic_name__ , default=__magic_name__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=__magic_name__ , default=__magic_name__ , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=__magic_name__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__magic_name__ , default=__magic_name__ , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=__magic_name__ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
_lowercase: str = parser.parse_args()
_lowercase: List[str] = {"lr": 3e-2, "num_epochs": 3, "seed": 4_2, "batch_size": 6_4, "image_size": 2_2_4}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 206
|
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE : int = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = XLMProphetNetTokenizer
lowerCamelCase : str = False
lowerCamelCase : List[str] = True
def UpperCAmelCase__ ( self : Any):
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase: Optional[Any] = XLMProphetNetTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase__ ( self : Tuple):
_lowercase: Optional[Any] = "[PAD]"
_lowercase: Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase) , _UpperCamelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase) , _UpperCamelCase)
def UpperCAmelCase__ ( self : Optional[int]):
_lowercase: Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "[PAD]")
self.assertEqual(vocab_keys[1] , "[CLS]")
self.assertEqual(vocab_keys[-1] , "j")
self.assertEqual(len(_UpperCamelCase) , 1_012)
def UpperCAmelCase__ ( self : str):
self.assertEqual(self.get_tokenizer().vocab_size , 1_012)
def UpperCAmelCase__ ( self : Tuple):
_lowercase: str = XLMProphetNetTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase)
_lowercase: Optional[int] = tokenizer.tokenize("This is a test")
self.assertListEqual(_UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowercase: Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_lowercase: Any = tokenizer.convert_tokens_to_ids(_UpperCamelCase)
self.assertListEqual(
_UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
_lowercase: str = tokenizer.convert_ids_to_tokens(_UpperCamelCase)
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def UpperCAmelCase__ ( self : Optional[int]):
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased")
@slow
def UpperCAmelCase__ ( self : str):
_lowercase: Optional[int] = "Hello World!"
_lowercase: Optional[int] = [35_389, 6_672, 49, 2]
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase))
@slow
def UpperCAmelCase__ ( self : Tuple):
# fmt: off
_lowercase: Dict = {"input_ids": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 206
| 1
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
@staticmethod
@abstractmethod
def __lowercase ( lowerCamelCase : ArgumentParser ) -> Tuple:
raise NotImplementedError()
@abstractmethod
def __lowercase ( self : Dict ) -> Any:
raise NotImplementedError()
| 275
|
'''simple docstring'''
import math
def UpperCamelCase_ ( A__ : float , A__ : float ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(A__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 275
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = 1
UpperCAmelCase : Union[str, Any] = 3
UpperCAmelCase : Optional[Any] = (3_2, 3_2)
UpperCAmelCase : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase_ )
return image
@property
def A_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
return model
@property
def A_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def A_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Any = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(lowercase_ )
@property
def A_ ( self ):
'''simple docstring'''
def extract(*snake_case , **snake_case ):
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
UpperCAmelCase : int = torch.ones([0] )
def A_ ( self , snake_case ):
'''simple docstring'''
self.pixel_values.to(lowercase_ )
return self
return Out()
return extract
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : str = self.dummy_cond_unet
UpperCAmelCase : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowercase_ )
UpperCAmelCase : str = self.dummy_vae
UpperCAmelCase : str = self.dummy_text_encoder
UpperCAmelCase : str = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCAmelCase : List[str] = 7_7
UpperCAmelCase : Tuple = self.dummy_image.to(lowercase_ )
UpperCAmelCase : str = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
UpperCAmelCase : Union[str, Any] = AltDiffusionImgaImgPipeline(
unet=lowercase_ , scheduler=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , safety_checker=lowercase_ , feature_extractor=self.dummy_extractor , )
UpperCAmelCase : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowercase_ )
UpperCAmelCase : int = alt_pipe.to(lowercase_ )
alt_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : List[str] = "A painting of a squirrel eating a burger"
UpperCAmelCase : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCAmelCase : int = alt_pipe(
[prompt] , generator=lowercase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowercase_ , )
UpperCAmelCase : int = output.images
UpperCAmelCase : Optional[int] = torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCAmelCase : Optional[int] = alt_pipe(
[prompt] , generator=lowercase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowercase_ , return_dict=lowercase_ , )[0]
UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase : int = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.dummy_cond_unet
UpperCAmelCase : Optional[Any] = PNDMScheduler(skip_prk_steps=lowercase_ )
UpperCAmelCase : str = self.dummy_vae
UpperCAmelCase : List[Any] = self.dummy_text_encoder
UpperCAmelCase : List[str] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCAmelCase : Union[str, Any] = 7_7
UpperCAmelCase : List[str] = self.dummy_image.to(lowercase_ )
# put models in fp16
UpperCAmelCase : int = unet.half()
UpperCAmelCase : Tuple = vae.half()
UpperCAmelCase : Union[str, Any] = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase : List[Any] = AltDiffusionImgaImgPipeline(
unet=lowercase_ , scheduler=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , safety_checker=lowercase_ , feature_extractor=self.dummy_extractor , )
UpperCAmelCase : int = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowercase_ )
UpperCAmelCase : Optional[int] = alt_pipe.to(lowercase_ )
alt_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : List[str] = "A painting of a squirrel eating a burger"
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = alt_pipe(
[prompt] , generator=lowercase_ , num_inference_steps=2 , output_type="np" , image=lowercase_ , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCAmelCase : List[str] = init_image.resize((7_6_0, 5_0_4) )
UpperCAmelCase : List[Any] = "BAAI/AltDiffusion"
UpperCAmelCase : Optional[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
UpperCAmelCase : Dict = "A fantasy landscape, trending on artstation"
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(
prompt=lowercase_ , image=lowercase_ , strength=0.75 , guidance_scale=7.5 , generator=lowercase_ , output_type="np" , )
UpperCAmelCase : Union[str, Any] = output.images[0]
UpperCAmelCase : str = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
UpperCAmelCase : Any = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase : Optional[Any] = init_image.resize((7_6_8, 5_1_2) )
UpperCAmelCase : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
UpperCAmelCase : List[str] = "BAAI/AltDiffusion"
UpperCAmelCase : Tuple = AltDiffusionImgaImgPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
UpperCAmelCase : Union[str, Any] = "A fantasy landscape, trending on artstation"
UpperCAmelCase : List[str] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(
prompt=lowercase_ , image=lowercase_ , strength=0.75 , guidance_scale=7.5 , generator=lowercase_ , output_type="np" , )
UpperCAmelCase : str = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 716
|
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Optional[Any] = logging.get_logger(__name__)
a : str = "▁"
a : Union[str, Any] = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
a : Tuple = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
a : List[str] = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
a : List[str] = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
a : Optional[int] = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = ["input_ids"]
SCREAMING_SNAKE_CASE__ : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Any = RESOURCE_FILES_NAMES
def __init__( self , snake_case , snake_case=None , snake_case=False , snake_case="utf8" , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case = None , **snake_case , ):
'''simple docstring'''
UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , vocab_file=snake_case , encoding=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
UpperCAmelCase : Tuple = do_lower_case
UpperCAmelCase : str = sentencepiece_model_ckpt
UpperCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase : Optional[int] = self.load_vocab(filepath=snake_case )
else:
UpperCAmelCase : Optional[Any] = {self.sp_model.id_to_piece(snake_case ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
def A_ ( self , snake_case ):
'''simple docstring'''
if text is None:
return None
UpperCAmelCase : List[str] = self.tokenize(snake_case )
UpperCAmelCase , UpperCAmelCase : str = "", []
for i, ch in enumerate(snake_case ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase : Tuple = self.SP_CHAR_MAPPING.get(snake_case )
else:
UpperCAmelCase : Optional[Any] = unicodedata.normalize("NFKC" , snake_case )
if self.is_whitespace(snake_case ):
continue
normalized_text += ch
char_mapping.extend([i] * len(snake_case ) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase : List[str] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase : Optional[Any] = token[1:]
UpperCAmelCase : List[str] = text[offset:].index(snake_case ) + offset
UpperCAmelCase : Optional[int] = start + len(snake_case )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase : List[Any] = end
return token_mapping
@property
def A_ ( self ):
'''simple docstring'''
return len(self.vocab )
def A_ ( self ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
'''simple docstring'''
UpperCAmelCase : int = self.__dict__.copy()
UpperCAmelCase : List[str] = None
return state
def __setstate__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase : int = {}
UpperCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def A_ ( self , snake_case ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(snake_case , snake_case ) for c in text) )
def A_ ( self , snake_case , snake_case=False , snake_case=6_4 , snake_case=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get("enable_sampling" ) is True:
UpperCAmelCase : Any = True
if self.sp_model_kwargs.get("alpha" ) is not None:
UpperCAmelCase : List[str] = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
UpperCAmelCase : Optional[int] = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
UpperCAmelCase : Dict = self.sp_model.EncodeAsPieces(snake_case )
else:
UpperCAmelCase : List[str] = self.sp_model.SampleEncodeAsPieces(snake_case , snake_case , snake_case )
UpperCAmelCase : Dict = []
for pi, piece in enumerate(snake_case ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(snake_case ) and pi != 0:
new_pieces.append(snake_case )
continue
else:
continue
UpperCAmelCase : Optional[int] = 0
for i, chunk in enumerate(snake_case ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(snake_case ) or self.is_punct(snake_case ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(snake_case )
UpperCAmelCase : Optional[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase : Dict = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase : int = i
if len(snake_case ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = "".join(snake_case ).replace(snake_case , " " ).strip()
return out_string
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = self.convert_ids_to_tokens(snake_case )
UpperCAmelCase : List[Any] = "".join(snake_case ).replace(snake_case , " " ).strip()
return out_string
def A_ ( self , snake_case ):
'''simple docstring'''
return self.vocab.get(snake_case , self.vocab.get(self.unk_token ) )
def A_ ( self , snake_case ):
'''simple docstring'''
return self.reverse_vocab.get(snake_case , self.unk_token )
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : str = [self.cls_token_id]
UpperCAmelCase : Optional[int] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def A_ ( self , snake_case , snake_case=None , snake_case=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(snake_case ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(snake_case ) + 1) + [1] * (len(snake_case ) + 3)
def A_ ( self , snake_case ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def A_ ( self , snake_case ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def A_ ( self , snake_case ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def A_ ( self , snake_case ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(snake_case ) == 1:
UpperCAmelCase : List[str] = unicodedata.category(snake_case )
if cat == "Zs":
return True
return False
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = {}
with io.open(snake_case , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(snake_case ):
UpperCAmelCase : Union[str, Any] = line.rstrip("\n" )
UpperCAmelCase : Tuple = int(snake_case )
return token_to_idx
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Any = 0
if os.path.isdir(snake_case ):
UpperCAmelCase : int = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
UpperCAmelCase : List[str] = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(snake_case , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase : Optional[Any] = token_index
writer.write(token + "\n" )
index += 1
UpperCAmelCase : List[str] = os.path.join(snake_case , "sentencepiece.bpe.model" )
with open(snake_case , "wb" ) as fi:
UpperCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (vocab_file,)
| 609
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : str = logging.get_logger(__name__)
class a__ ( snake_case__ ):
__lowerCAmelCase = """timm_backbone"""
def __init__( self , _a=None , _a=3 , _a=True , _a=True , _a=None , **_a , ):
super().__init__(**__snake_case )
lowercase : Optional[Any] = backbone
lowercase : Optional[int] = num_channels
lowercase : Optional[Any] = features_only
lowercase : str = use_pretrained_backbone
lowercase : Any = True
lowercase : List[Any] = out_indices if out_indices is not None else (-1,)
| 361
|
def UpperCAmelCase__ (UpperCamelCase_ = 10_00 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) )
if __name__ == "__main__":
print(solution())
| 550
| 0
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _a :
def __init__( self: List[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Any=13 , UpperCamelCase_: str=10 , UpperCamelCase_: int=3 , UpperCamelCase_: Any=2 , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: Tuple=True , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: int=32 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: Dict=4 , UpperCamelCase_: List[Any]=37 , UpperCamelCase_: str="gelu" , UpperCamelCase_: str=0.1 , UpperCamelCase_: int=0.1 , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: int=0.02 , UpperCamelCase_: Union[str, Any]="divided_space_time" , UpperCamelCase_: int=None , ) -> Tuple:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = num_frames
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = attention_type
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = (num_frames) * self.num_patches_per_frame + 1
def lowerCamelCase_ ( self: Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
lowercase__ = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase__ = self.num_labels
return config
def lowerCamelCase_ ( self: int , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] ) -> int:
"""simple docstring"""
lowercase__ = TimesformerModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = TimesformerForVideoClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ )
# verify the logits shape
lowercase__ = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
_lowercase : List[Any] = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
_lowercase : Dict = False
_lowercase : List[str] = False
_lowercase : Optional[Any] = False
_lowercase : Any = False
def lowerCamelCase_ ( self: Tuple ) -> Dict:
"""simple docstring"""
lowercase__ = TimesformerModelTester(self )
lowercase__ = ConfigTester(
self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Any , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int]=False ) -> Tuple:
"""simple docstring"""
lowercase__ = copy.deepcopy(UpperCamelCase_ )
if return_labels:
if model_class in get_values(UpperCamelCase_ ):
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
return inputs_dict
def lowerCamelCase_ ( self: Optional[Any] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def lowerCamelCase_ ( self: str ) -> str:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def lowerCamelCase_ ( self: Tuple ) -> str:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TimesformerModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
for model_class in self.all_model_classes:
lowercase__ = self.model_tester.seq_length
lowercase__ = self.model_tester.num_frames
lowercase__ = True
lowercase__ = False
lowercase__ = True
lowercase__ = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase__ = outputs.attentions
self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ = True
lowercase__ = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase__ = outputs.attentions
self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase__ = len(UpperCamelCase_ )
# Check attention is always last and order is fine
lowercase__ = True
lowercase__ = True
lowercase__ = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase_ ) )
lowercase__ = outputs.attentions
self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowerCamelCase_ ( self: Dict ) -> Union[str, Any]:
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase_: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: Tuple ):
lowercase__ = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase__ = outputs.hidden_states
lowercase__ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
lowercase__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def _a ( ):
"""simple docstring"""
lowercase__ = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowercase__ = np.load(SCREAMING_SNAKE_CASE )
return list(SCREAMING_SNAKE_CASE )
@require_torch
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self: Dict ) -> str:
"""simple docstring"""
lowercase__ = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
UpperCamelCase_ )
lowercase__ = self.default_image_processor
lowercase__ = prepare_video()
lowercase__ = image_processor(video[:8] , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowercase__ = model(**UpperCamelCase_ )
# verify the logits
lowercase__ = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowercase__ = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 429
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: int ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowercase__ = Vector()
def lowerCamelCase_ ( self: int ) -> None:
"""simple docstring"""
lowercase__ = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCamelCase_ ) , '''(0,0,0,0,0,1)''' )
def lowerCamelCase_ ( self: Optional[int] ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCamelCase_ ) , 4 )
def lowerCamelCase_ ( self: str ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2] )
lowercase__ = Vector([1, 2, 3, 4, 5] )
lowercase__ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowercase__ = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowerCamelCase_ ( self: List[str] ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3] )
lowercase__ = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCamelCase_ ( self: Optional[int] ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3] )
lowercase__ = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCamelCase_ ( self: str ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3] )
lowercase__ = Vector([2, -1, 4] ) # for test of dot product
lowercase__ = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def lowerCamelCase_ ( self: Optional[int] ) -> None:
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def lowerCamelCase_ ( self: Any ) -> None:
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def lowerCamelCase_ ( self: List[Any] ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3] )
lowercase__ = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCamelCase_ , UpperCamelCase_ ) ) , '''(3,4,7)''' )
def lowerCamelCase_ ( self: Dict ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 0, 0, 0, 0, 0] )
lowercase__ = x.copy()
self.assertEqual(str(UpperCamelCase_ ) , str(UpperCamelCase_ ) )
def lowerCamelCase_ ( self: Any ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCamelCase_ ) , '''(0,1,0)''' )
def lowerCamelCase_ ( self: Optional[Any] ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(UpperCamelCase_ ) )
def lowerCamelCase_ ( self: Any ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase__ = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase_ ( self: Optional[Any] ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase__ = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase_ ( self: List[str] ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCamelCase_ ( self: Union[str, Any] ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowercase__ = Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def lowerCamelCase_ ( self: int ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(UpperCamelCase_ ) )
def lowerCamelCase_ ( self: Union[str, Any] ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCamelCase_ ( self: Dict ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def lowerCamelCase_ ( self: Any ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def lowerCamelCase_ ( self: Optional[int] ) -> None:
"""simple docstring"""
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 429
| 1
|
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Union[str, Any] = ["image_processor", "tokenizer"]
UpperCamelCase_ : List[str] = "BlipImageProcessor"
UpperCamelCase_ : int = "AutoTokenizer"
def __init__( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ):
super().__init__(snake_case__ , snake_case__ )
# add QFormer tokenizer
lowerCAmelCase__ = qformer_tokenizer
def __call__( self : Optional[int] , snake_case__ : ImageInput = None , snake_case__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case__ : bool = True , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Union[bool, str, TruncationStrategy] = None , snake_case__ : Optional[int] = None , snake_case__ : int = 0 , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : Optional[Any] , ):
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
lowerCAmelCase__ = BatchFeature()
if text is not None:
lowerCAmelCase__ = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
encoding.update(snake_case__ )
lowerCAmelCase__ = self.qformer_tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
lowerCAmelCase__ = qformer_text_encoding.pop("""input_ids""" )
lowerCAmelCase__ = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
lowerCAmelCase__ = self.image_processor(snake_case__ , return_tensors=snake_case__ )
encoding.update(snake_case__ )
return encoding
def _SCREAMING_SNAKE_CASE ( self : str , *snake_case__ : Tuple , **snake_case__ : Dict ):
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict , *snake_case__ : Optional[int] , **snake_case__ : Any ):
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = self.tokenizer.model_input_names
lowerCAmelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Optional[Any] , **snake_case__ : Dict ):
if os.path.isfile(snake_case__ ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCAmelCase__ = os.path.join(snake_case__ , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(snake_case__ )
return super().save_pretrained(snake_case__ , **snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple , snake_case__ : Dict , **snake_case__ : Any ):
lowerCAmelCase__ = AutoTokenizer.from_pretrained(snake_case__ , subfolder="""qformer_tokenizer""" )
lowerCAmelCase__ = cls._get_arguments_from_pretrained(snake_case__ , **snake_case__ )
args.append(snake_case__ )
return cls(*snake_case__ )
| 644
|
"""simple docstring"""
from itertools import permutations
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCAmelCase__ = [7, 11, 13, 17]
for i, test in enumerate(lowerCamelCase__ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def _UpperCAmelCase ( lowerCamelCase__ = 10 ):
"""simple docstring"""
return sum(
int("""""".join(map(lowerCamelCase__ , lowerCamelCase__ ) ) )
for num in permutations(range(lowerCamelCase__ ) )
if is_substring_divisible(lowerCamelCase__ ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 644
| 1
|
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowerCAmelCase : Optional[int] = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=1_6,
),
'''generator''': 2,
},
# 2048-bit
1_4: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=1_6,
),
'''generator''': 2,
},
# 3072-bit
1_5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=1_6,
),
'''generator''': 2,
},
# 4096-bit
1_6: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=1_6,
),
'''generator''': 2,
},
# 6144-bit
1_7: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=1_6,
),
'''generator''': 2,
},
# 8192-bit
1_8: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=1_6,
),
'''generator''': 2,
},
}
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase = 14 ) -> None:
if group not in primes:
raise ValueError("Unsupported Group" )
__lowerCAmelCase = primes[group]["prime"]
__lowerCAmelCase = primes[group]["generator"]
__lowerCAmelCase = int(hexlify(urandom(32 ) ) , base=16 )
def UpperCAmelCase_ ( self ) -> str:
return hex(self.__private_key )[2:]
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = pow(self.generator , self.__private_key , self.prime )
return hex(UpperCamelCase )[2:]
def UpperCAmelCase_ ( self , UpperCamelCase ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(UpperCamelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
__lowerCAmelCase = int(UpperCamelCase , base=16 )
if not self.is_valid_public_key(UpperCamelCase ):
raise ValueError("Invalid public key" )
__lowerCAmelCase = pow(UpperCamelCase , self.__private_key , self.prime )
return shaaaa(str(UpperCamelCase ).encode() ).hexdigest()
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(UpperCamelCase , (prime - 1) // 2 , UpperCamelCase ) == 1
)
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 14 ) -> str:
__lowerCAmelCase = int(UpperCamelCase , base=16 )
__lowerCAmelCase = int(UpperCamelCase , base=16 )
__lowerCAmelCase = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(UpperCamelCase , UpperCamelCase ):
raise ValueError("Invalid public key" )
__lowerCAmelCase = pow(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return shaaaa(str(UpperCamelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Optional[Any] = """dpr"""
def __init__( self , UpperCamelCase=3_0522 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=1E-12 , UpperCamelCase=0 , UpperCamelCase="absolute" , UpperCamelCase = 0 , **UpperCamelCase , ) -> Tuple:
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = projection_dim
__lowerCAmelCase = position_embedding_type
| 39
| 1
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self :List[str], snake_case :List[str], snake_case :int=13, snake_case :Optional[Any]=7, snake_case :List[str]=True, snake_case :str=True, snake_case :Optional[Any]=False, snake_case :Tuple=True, snake_case :str=99, snake_case :List[str]=32, snake_case :Any=5, snake_case :str=4, snake_case :List[str]=64, snake_case :Dict="gelu", snake_case :Optional[int]=0.1, snake_case :Union[str, Any]=0.1, snake_case :List[str]=512, snake_case :Tuple=16, snake_case :Optional[Any]=2, snake_case :Optional[int]=0.0_2, snake_case :Dict=3, snake_case :Optional[int]=4, snake_case :Any=None, snake_case :int=2, snake_case :Any=2, snake_case :str=2, snake_case :Optional[int]=2, snake_case :List[str]=4, snake_case :Optional[Any]=1, ):
"""simple docstring"""
_lowercase =parent
_lowercase =batch_size
_lowercase =seq_length
_lowercase =is_training
_lowercase =use_input_mask
_lowercase =use_token_type_ids
_lowercase =use_labels
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =type_vocab_size
_lowercase =type_sequence_label_size
_lowercase =initializer_range
_lowercase =num_labels
_lowercase =num_choices
_lowercase =scope
_lowercase =q_groups
_lowercase =k_groups
_lowercase =v_groups
_lowercase =post_attention_groups
_lowercase =intermediate_groups
_lowercase =output_groups
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase =ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase =None
if self.use_input_mask:
_lowercase =random_attention_mask([self.batch_size, self.seq_length])
_lowercase =None
_lowercase =None
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase =ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase =ids_tensor([self.batch_size], self.num_choices)
_lowercase =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, attention_probs_dropout_prob=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, q_groups=self.q_groups, k_groups=self.k_groups, v_groups=self.v_groups, post_attention_groups=self.post_attention_groups, intermediate_groups=self.intermediate_groups, output_groups=self.output_groups, )
def UpperCamelCase__ ( self :Union[str, Any], snake_case :Optional[Any], snake_case :int, snake_case :int, snake_case :Optional[Any], snake_case :Optional[int], snake_case :Optional[Any]):
"""simple docstring"""
_lowercase =SqueezeBertModel(config=lowercase_)
model.to(lowercase_)
model.eval()
_lowercase =model(lowercase_, lowercase_)
_lowercase =model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase__ ( self :Union[str, Any], snake_case :Tuple, snake_case :Dict, snake_case :Union[str, Any], snake_case :Optional[Any], snake_case :Union[str, Any], snake_case :Dict):
"""simple docstring"""
_lowercase =SqueezeBertForMaskedLM(config=lowercase_)
model.to(lowercase_)
model.eval()
_lowercase =model(lowercase_, attention_mask=lowercase_, labels=lowercase_)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase__ ( self :Optional[int], snake_case :int, snake_case :Any, snake_case :List[str], snake_case :Dict, snake_case :Tuple, snake_case :List[Any]):
"""simple docstring"""
_lowercase =SqueezeBertForQuestionAnswering(config=lowercase_)
model.to(lowercase_)
model.eval()
_lowercase =model(
lowercase_, attention_mask=lowercase_, start_positions=lowercase_, end_positions=lowercase_)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase__ ( self :Tuple, snake_case :Dict, snake_case :List[Any], snake_case :List[Any], snake_case :int, snake_case :List[str], snake_case :List[Any]):
"""simple docstring"""
_lowercase =self.num_labels
_lowercase =SqueezeBertForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
_lowercase =model(lowercase_, attention_mask=lowercase_, labels=lowercase_)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase__ ( self :Dict, snake_case :str, snake_case :str, snake_case :List[Any], snake_case :Optional[int], snake_case :Optional[int], snake_case :Optional[int]):
"""simple docstring"""
_lowercase =self.num_labels
_lowercase =SqueezeBertForTokenClassification(config=lowercase_)
model.to(lowercase_)
model.eval()
_lowercase =model(lowercase_, attention_mask=lowercase_, labels=lowercase_)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase__ ( self :Optional[int], snake_case :Tuple, snake_case :Optional[Any], snake_case :Tuple, snake_case :str, snake_case :List[Any], snake_case :List[str]):
"""simple docstring"""
_lowercase =self.num_choices
_lowercase =SqueezeBertForMultipleChoice(config=lowercase_)
model.to(lowercase_)
model.eval()
_lowercase =input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase =input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase =model(
lowercase_, attention_mask=lowercase_, labels=lowercase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
_lowercase =self.prepare_config_and_inputs()
(_lowercase) =config_and_inputs
_lowercase ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : str =(
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__lowerCAmelCase : List[Any] =(
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : Tuple =False
__lowerCAmelCase : Dict =True
__lowerCAmelCase : List[str] =False
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
_lowercase =SqueezeBertModelTester(self)
_lowercase =ConfigTester(self, config_class=lowercase_, dim=37)
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowercase_)
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowercase_)
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowercase_)
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowercase_)
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowercase_)
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowercase_)
@slow
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase =SqueezeBertModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
@require_sentencepiece
@require_tokenizers
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli')
_lowercase =torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]])
_lowercase =model(lowercase_)[0]
_lowercase =torch.Size((1, 3))
self.assertEqual(output.shape, lowercase_)
_lowercase =torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]])
self.assertTrue(torch.allclose(lowercase_, lowercase_, atol=1e-4))
| 181
|
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
lowercase__ : Optional[int] = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
lowercase__ : Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase__ : List[Any] = dict(zip(vocab, range(len(vocab))))
lowercase__ : Optional[int] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Optional[Any] = Path(tmpdirname)
lowercase__ : Any = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
lowercase__ : Tuple = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
lowercase__ : List[Any] = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
lowercase__ : int = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
lowercase__ : Tuple = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=10_00,
tgt_vocab_size=10_00,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
lowercase__ : Optional[int] = FSMTForConditionalGeneration(config)
print(f'num of params {tiny_model.num_parameters()}')
# Test
lowercase__ : Dict = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
lowercase__ : List[Any] = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 123
| 0
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = 42
@flax_register_to_config
class lowerCAmelCase_ ( nn.Module, __lowercase, __lowercase ):
UpperCAmelCase = 32
UpperCAmelCase = 4
UpperCAmelCase = 4
UpperCAmelCase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
UpperCAmelCase = False
UpperCAmelCase = (320, 640, 1280, 1280)
UpperCAmelCase = 2
UpperCAmelCase = 8
UpperCAmelCase = None
UpperCAmelCase = 1280
UpperCAmelCase = 0.0
UpperCAmelCase = False
UpperCAmelCase = jnp.floataa
UpperCAmelCase = True
UpperCAmelCase = 0
UpperCAmelCase = False
def UpperCamelCase_ ( self : Any , _A : jax.random.KeyArray ):
# init input tensors
_UpperCamelCase = (1, self.in_channels, self.sample_size, self.sample_size)
_UpperCamelCase = jnp.zeros(_A , dtype=jnp.floataa )
_UpperCamelCase = jnp.ones((1,) , dtype=jnp.intaa )
_UpperCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_UpperCamelCase , _UpperCamelCase = jax.random.split(_A )
_UpperCamelCase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_A , _A , _A , _A )["params"]
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = self.block_out_channels
_UpperCamelCase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_UpperCamelCase = self.num_attention_heads or self.attention_head_dim
# input
_UpperCamelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_UpperCamelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_UpperCamelCase = FlaxTimestepEmbedding(_A , dtype=self.dtype )
_UpperCamelCase = self.only_cross_attention
if isinstance(_A , _A ):
_UpperCamelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_A , _A ):
_UpperCamelCase = (num_attention_heads,) * len(self.down_block_types )
# down
_UpperCamelCase = []
_UpperCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
_UpperCamelCase = output_channel
_UpperCamelCase = block_out_channels[i]
_UpperCamelCase = i == len(_A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_UpperCamelCase = FlaxCrossAttnDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_UpperCamelCase = FlaxDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_A )
_UpperCamelCase = down_blocks
# mid
_UpperCamelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
_UpperCamelCase = []
_UpperCamelCase = list(reversed(_A ) )
_UpperCamelCase = list(reversed(_A ) )
_UpperCamelCase = list(reversed(_A ) )
_UpperCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
_UpperCamelCase = output_channel
_UpperCamelCase = reversed_block_out_channels[i]
_UpperCamelCase = reversed_block_out_channels[min(i + 1 , len(_A ) - 1 )]
_UpperCamelCase = i == len(_A ) - 1
if up_block_type == "CrossAttnUpBlock2D":
_UpperCamelCase = FlaxCrossAttnUpBlockaD(
in_channels=_A , out_channels=_A , prev_output_channel=_A , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_UpperCamelCase = FlaxUpBlockaD(
in_channels=_A , out_channels=_A , prev_output_channel=_A , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_A )
_UpperCamelCase = output_channel
_UpperCamelCase = up_blocks
# out
_UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_UpperCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[Any] , _A : Dict , _A : Optional[int] , _A : List[str] , _A : str=None , _A : List[str]=None , _A : bool = True , _A : bool = False , ):
# 1. time
if not isinstance(_A , jnp.ndarray ):
_UpperCamelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_A , jnp.ndarray ) and len(timesteps.shape ) == 0:
_UpperCamelCase = timesteps.astype(dtype=jnp.floataa )
_UpperCamelCase = jnp.expand_dims(_A , 0 )
_UpperCamelCase = self.time_proj(_A )
_UpperCamelCase = self.time_embedding(_A )
# 2. pre-process
_UpperCamelCase = jnp.transpose(_A , (0, 2, 3, 1) )
_UpperCamelCase = self.conv_in(_A )
# 3. down
_UpperCamelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(_A , _A ):
_UpperCamelCase , _UpperCamelCase = down_block(_A , _A , _A , deterministic=not train )
else:
_UpperCamelCase , _UpperCamelCase = down_block(_A , _A , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_UpperCamelCase = ()
for down_block_res_sample, down_block_additional_residual in zip(
_A , _A ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_UpperCamelCase = new_down_block_res_samples
# 4. mid
_UpperCamelCase = self.mid_block(_A , _A , _A , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_UpperCamelCase = down_block_res_samples[-(self.layers_per_block + 1) :]
_UpperCamelCase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_A , _A ):
_UpperCamelCase = up_block(
_A , temb=_A , encoder_hidden_states=_A , res_hidden_states_tuple=_A , deterministic=not train , )
else:
_UpperCamelCase = up_block(_A , temb=_A , res_hidden_states_tuple=_A , deterministic=not train )
# 6. post-process
_UpperCamelCase = self.conv_norm_out(_A )
_UpperCamelCase = nn.silu(_A )
_UpperCamelCase = self.conv_out(_A )
_UpperCamelCase = jnp.transpose(_A , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_A )
| 71
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , _A : Optional[Any] , _A : List[str]=13 , _A : Union[str, Any]=7 , _A : int=True , _A : Optional[int]=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : Optional[int]=99 , _A : Union[str, Any]=32 , _A : Dict=2 , _A : List[Any]=4 , _A : Optional[Any]=37 , _A : int="gelu" , _A : Optional[int]=0.1 , _A : str=0.1 , _A : List[str]=512 , _A : Optional[Any]=16 , _A : Optional[Any]=2 , _A : Optional[int]=0.02 , _A : str=False , _A : int=True , _A : Any="None" , _A : Dict=3 , _A : List[Any]=4 , _A : Optional[Any]=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = relative_attention
_UpperCamelCase = position_biased_input
_UpperCamelCase = pos_att_type
_UpperCamelCase = scope
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : int , _A : Optional[Any] ):
_UpperCamelCase = TFDebertaVaModel(config=_A )
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Dict , _A : Optional[int] , _A : Any , _A : Dict , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] , _A : List[str] ):
_UpperCamelCase = TFDebertaVaForMaskedLM(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Dict , _A : Dict , _A : List[str] , _A : List[Any] , _A : List[Any] , _A : Optional[Any] , _A : Tuple , _A : int ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFDebertaVaForSequenceClassification(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : Optional[int] , _A : Any , _A : List[Any] , _A : Dict , _A : Union[str, Any] , _A : List[str] ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFDebertaVaForTokenClassification(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Dict , _A : Optional[Any] , _A : Optional[int] , _A : Any , _A : List[str] , _A : str , _A : Optional[int] , _A : str ):
_UpperCamelCase = TFDebertaVaForQuestionAnswering(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = TFDebertaVaModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(_A )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def UpperCamelCase_ ( self : List[Any] ):
pass
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
_UpperCamelCase = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_UpperCamelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase = model(_A , attention_mask=_A )[0]
_UpperCamelCase = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , _A , atol=1e-4 )
| 71
| 1
|
A : Optional[Any] = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
lowercase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
A : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
A : List[str] = True
A : Union[str, Any] = False
def UpperCamelCase ( __magic_name__ : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowercase__ = chain(next_number(__magic_name__ ) )
lowercase__ = number_chain
while number < 1000_0000:
lowercase__ = number_chain
number *= 10
return number_chain
def UpperCamelCase ( __magic_name__ : int = 1000_0000 ) -> int:
"""simple docstring"""
for i in range(1 , __magic_name__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 15
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''Wav2Vec2FeatureExtractor'''
SCREAMING_SNAKE_CASE__ = '''AutoTokenizer'''
def __init__( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Dict ):
'''simple docstring'''
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = self.feature_extractor
SCREAMING_SNAKE_CASE : List[Any] = False
@classmethod
def lowerCamelCase_ ( cls : int , lowerCamelCase_ : List[str] , **lowerCamelCase_ : Any ):
'''simple docstring'''
try:
return super().from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = WavaVecaCTCTokenizer.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
return cls(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
def __call__( self : List[Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : int ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_ , **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
SCREAMING_SNAKE_CASE : Dict = kwargs.pop("""raw_speech""" )
else:
SCREAMING_SNAKE_CASE : Any = kwargs.pop("""audio""" , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("""sampling_rate""" , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = kwargs.pop("""text""" , lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = args[0]
SCREAMING_SNAKE_CASE : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
SCREAMING_SNAKE_CASE : str = self.feature_extractor(lowerCamelCase_ , *lowerCamelCase_ , sampling_rate=lowerCamelCase_ , **lowerCamelCase_ )
if text is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(lowerCamelCase_ , **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE : List[str] = encodings["""input_ids"""]
return inputs
def lowerCamelCase_ ( self : Optional[Any] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Dict ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = kwargs.pop("""input_features""" , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = kwargs.pop("""labels""" , lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
SCREAMING_SNAKE_CASE : Optional[int] = args[0]
SCREAMING_SNAKE_CASE : Optional[Any] = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE : Any = self.feature_extractor.pad(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
if labels is not None:
SCREAMING_SNAKE_CASE : int = self.tokenizer.pad(lowerCamelCase_ , **lowerCamelCase_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE : Dict = labels["""input_ids"""]
return input_features
def lowerCamelCase_ ( self : Optional[int] , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@contextmanager
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer
yield
SCREAMING_SNAKE_CASE : int = self.feature_extractor
SCREAMING_SNAKE_CASE : Optional[Any] = False
| 379
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE( metaclass=__A ):
snake_case_ : Optional[Any] = ["""keras_nlp"""]
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
"""simple docstring"""
requires_backends(self , ["""keras_nlp"""] )
| 163
|
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class SCREAMING_SNAKE_CASE( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1.0 , lowerCamelCase__ = None , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowercase = initial_learning_rate
__lowercase = warmup_steps
__lowercase = power
__lowercase = decay_schedule_fn
__lowercase = name
def __call__( self , lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__lowercase = tf.cast(lowerCamelCase__ , tf.floataa )
__lowercase = tf.cast(self.warmup_steps , tf.floataa )
__lowercase = global_step_float / warmup_steps_float
__lowercase = self.initial_learning_rate * tf.math.pow(lowerCamelCase__ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowerCamelCase__ , )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def snake_case_ ( a__ : float ,a__ : int ,a__ : int ,a__ : float = 0.0 ,a__ : float = 0.9 ,a__ : float = 0.9_9_9 ,a__ : float = 1e-8 ,a__ : Optional[float] = None ,a__ : Optional[float] = None ,a__ : float = 0.0 ,a__ : float = 1.0 ,a__ : Optional[List[str]] = None ,):
"""simple docstring"""
__lowercase = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=a__ ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=a__ ,)
if num_warmup_steps:
__lowercase = WarmUp(
initial_learning_rate=a__ ,decay_schedule_fn=a__ ,warmup_steps=a__ ,)
if weight_decay_rate > 0.0:
__lowercase = AdamWeightDecay(
learning_rate=a__ ,weight_decay_rate=a__ ,beta_a=a__ ,beta_a=a__ ,epsilon=a__ ,clipnorm=a__ ,global_clipnorm=a__ ,exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] ,include_in_weight_decay=a__ ,)
else:
__lowercase = tf.keras.optimizers.Adam(
learning_rate=a__ ,beta_a=a__ ,beta_a=a__ ,epsilon=a__ ,clipnorm=a__ ,global_clipnorm=a__ ,)
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class SCREAMING_SNAKE_CASE( __A ):
def __init__( self , lowerCamelCase__ = 0.0_01 , lowerCamelCase__ = 0.9 , lowerCamelCase__ = 0.9_99 , lowerCamelCase__ = 1E-7 , lowerCamelCase__ = False , lowerCamelCase__ = 0.0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "AdamWeightDecay" , **lowerCamelCase__ , ) -> Any:
"""simple docstring"""
super().__init__(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
__lowercase = weight_decay_rate
__lowercase = include_in_weight_decay
__lowercase = exclude_from_weight_decay
@classmethod
def snake_case__ ( cls , lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
__lowercase = {"""WarmUp""": WarmUp}
return super(lowerCamelCase__ , cls ).from_config(lowerCamelCase__ , custom_objects=lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
super(lowerCamelCase__ , self )._prepare_local(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowercase = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__lowercase = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ) -> Any:
"""simple docstring"""
__lowercase ,__lowercase = list(zip(*lowerCamelCase__ ) )
return super(lowerCamelCase__ , self ).apply_gradients(zip(lowerCamelCase__ , lowerCamelCase__ ) , name=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__lowercase = apply_state or {}
__lowercase = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__lowercase = self._fallback_apply_state(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> Any:
"""simple docstring"""
__lowercase ,__lowercase = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase__ )
__lowercase = self._decay_weights_op(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__ , self )._resource_apply_dense(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> Dict:
"""simple docstring"""
__lowercase ,__lowercase = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase__ )
__lowercase = self._decay_weights_op(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__ , self )._resource_apply_sparse(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
__lowercase = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def snake_case__ ( self , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCamelCase__ , lowerCamelCase__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCamelCase__ , lowerCamelCase__ ) is not None:
return False
return True
class SCREAMING_SNAKE_CASE( __A ):
def __init__( self ) -> Optional[int]:
"""simple docstring"""
__lowercase = []
__lowercase = None
@property
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
if self._accum_steps is None:
__lowercase = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowerCamelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , lowerCamelCase__ ) -> Dict:
"""simple docstring"""
if not self._gradients:
__lowercase = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCamelCase__ ) , trainable=lowerCamelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowerCamelCase__ ) != len(self._gradients ):
raise ValueError(F'Expected {len(self._gradients )} gradients, but got {len(lowerCamelCase__ )}' )
for accum_gradient, gradient in zip(self._gradients , lowerCamelCase__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCamelCase__ )
self._accum_steps.assign_add(1 )
def snake_case__ ( self ) -> str:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCamelCase__ ) )
| 163
| 1
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
if digit_amount > 0:
return round(number - int(lowercase ) , lowercase )
return number - int(lowercase )
if __name__ == "__main__":
print(decimal_isolate(1.5_3, 0))
print(decimal_isolate(3_5.3_4_5, 1))
print(decimal_isolate(3_5.3_4_5, 2))
print(decimal_isolate(3_5.3_4_5, 3))
print(decimal_isolate(-1_4.7_8_9, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-1_4.1_2_3, 1))
print(decimal_isolate(-1_4.1_2_3, 2))
print(decimal_isolate(-1_4.1_2_3, 3))
| 409
|
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : Any , *lowerCamelCase__ : Any , **lowerCamelCase__ : Any ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Union[str, Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : List[str] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : Any , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : List[Any] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Union[str, Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : Union[str, Any] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : Optional[Any] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : List[Any] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : Tuple , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Any ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : str , *lowerCamelCase__ : Any , **lowerCamelCase__ : Union[str, Any] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : str , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : List[str] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : List[str] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Optional[int] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : str ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : str , *lowerCamelCase__ : int , **lowerCamelCase__ : List[Any] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : str , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Any ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Optional[int] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : Optional[Any] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[int] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : Tuple , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Union[str, Any] , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[str] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[Any] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : str , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : List[str] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Optional[int] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Any ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : int , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : str ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : List[Any] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[Any] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Dict , *lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : Tuple , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[int] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : Dict , *lowerCamelCase__ : Any , **lowerCamelCase__ : Any ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Optional[int] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[int] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : List[Any] , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Tuple ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : List[str] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : List[str] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[int] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : List[str] , *lowerCamelCase__ : Any , **lowerCamelCase__ : Optional[Any] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : str ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Any , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[int] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : str , *lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : List[Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : List[Any] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : List[str] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : int ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : int , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : List[str] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Union[str, Any] , *lowerCamelCase__ : Any , **lowerCamelCase__ : Optional[int] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : Dict , *lowerCamelCase__ : int , **lowerCamelCase__ : Any ):
requires_backends(cls , ['''flax'''] )
| 348
| 0
|
"""simple docstring"""
from PIL import Image
def __a ( A , A ) -> Image:
'''simple docstring'''
A__ = (259 * (level + 255)) / (255 * (259 - level))
def contrast(A ) -> int:
return int(128 + factor * (c - 128) )
return img.point(A )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
__UpperCAmelCase =change_contrast(img, 170)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 261
|
"""simple docstring"""
def __a ( A , A ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def __a ( ) -> None:
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 261
| 1
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase : int = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_UpperCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_UpperCamelCase = field(
default=a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_UpperCamelCase = field(
default='NER' ,metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
_UpperCamelCase = field(
default=a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_UpperCamelCase = field(default=a ,metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCamelCase = field(
default=a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
_UpperCamelCase = field(
default=a ,metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} ,)
_UpperCamelCase = field(
default=128 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
_UpperCamelCase = field(
default=a ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def A__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
lowerCamelCase__ = import_module("""tasks""" )
try:
lowerCamelCase__ = getattr(__lowerCAmelCase , model_args.task_type )
lowerCamelCase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , __lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowerCamelCase__ = token_classification_task.get_labels(data_args.labels )
lowerCamelCase__ = dict(enumerate(__lowerCAmelCase ) )
lowerCamelCase__ = len(__lowerCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid={label: i for i, label in enumerate(__lowerCAmelCase )} , cache_dir=model_args.cache_dir , )
lowerCamelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowerCamelCase__ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCamelCase__ = (
TokenClassificationDataset(
token_classification_task=__lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , labels=__lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCamelCase__ = (
TokenClassificationDataset(
token_classification_task=__lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , labels=__lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ) -> Tuple[List[int], List[int]]:
lowerCamelCase__ = np.argmax(__lowerCAmelCase , axis=2 )
lowerCamelCase__ , lowerCamelCase__ = preds.shape
lowerCamelCase__ = [[] for _ in range(__lowerCAmelCase )]
lowerCamelCase__ = [[] for _ in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__lowerCAmelCase : EvalPrediction ) -> Dict:
lowerCamelCase__ , lowerCamelCase__ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__lowerCAmelCase , __lowerCAmelCase ),
"precision": precision_score(__lowerCAmelCase , __lowerCAmelCase ),
"recall": recall_score(__lowerCAmelCase , __lowerCAmelCase ),
"f1": fa_score(__lowerCAmelCase , __lowerCAmelCase ),
}
# Data collator
lowerCamelCase__ = DataCollatorWithPadding(__lowerCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCamelCase__ = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , compute_metrics=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCamelCase__ = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCamelCase__ = trainer.evaluate()
lowerCamelCase__ = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , __lowerCAmelCase , __lowerCAmelCase )
writer.write("""%s = %s\n""" % (key, value) )
results.update(__lowerCAmelCase )
# Predict
if training_args.do_predict:
lowerCamelCase__ = TokenClassificationDataset(
token_classification_task=__lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , labels=__lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = trainer.predict(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = align_predictions(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = os.path.join(training_args.output_dir , """test_results.txt""" )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , """w""" ) as writer:
for key, value in metrics.items():
logger.info(""" %s = %s""" , __lowerCAmelCase , __lowerCAmelCase )
writer.write("""%s = %s\n""" % (key, value) )
# Save predictions
lowerCamelCase__ = os.path.join(training_args.output_dir , """test_predictions.txt""" )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , """w""" ) as writer:
with open(os.path.join(data_args.data_dir , """test.txt""" ) , """r""" ) as f:
token_classification_task.write_predictions_to_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return results
def A__ ( __lowerCAmelCase : Union[str, Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 50
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False , )-> str:
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Embedding(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = nn.Embedding(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = False
UpperCamelCase = nn.Dropout(p=UpperCAmelCase_ )
UpperCamelCase = TaConfig(
vocab_size=UpperCAmelCase_ , d_model=UpperCAmelCase_ , num_heads=UpperCAmelCase_ , d_kv=UpperCAmelCase_ , d_ff=UpperCAmelCase_ , dropout_rate=UpperCAmelCase_ , feed_forward_proj=UpperCAmelCase_ , is_decoder=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , )
UpperCamelCase = nn.ModuleList()
for lyr_num in range(UpperCAmelCase_ ):
UpperCamelCase = TaBlock(UpperCAmelCase_ )
self.encoders.append(UpperCAmelCase_ )
UpperCamelCase = TaLayerNorm(UpperCAmelCase_ )
UpperCamelCase = nn.Dropout(p=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str )-> List[Any]:
"""simple docstring"""
UpperCamelCase = self.token_embedder(UpperCAmelCase_ )
UpperCamelCase = encoder_input_tokens.shape[1]
UpperCamelCase = torch.arange(UpperCAmelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCAmelCase_ )
UpperCamelCase = self.dropout_pre(UpperCAmelCase_ )
# inverted the attention mask
UpperCamelCase = encoder_input_tokens.size()
UpperCamelCase = self.get_extended_attention_mask(UpperCAmelCase_ , UpperCAmelCase_ )
for lyr in self.encoders:
UpperCamelCase = lyr(UpperCAmelCase_ , UpperCAmelCase_ )[0]
UpperCamelCase = self.layer_norm(UpperCAmelCase_ )
return self.dropout_post(UpperCAmelCase_ ), encoder_inputs_mask
| 554
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( _a : List[str] , _a : Any , _a : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = MobileBertConfig.from_json_file(lowerCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ : Dict = MobileBertForPreTraining(lowerCamelCase_ )
# Load weights from tf checkpoint
UpperCAmelCase_ : Union[str, Any] = load_tf_weights_in_mobilebert(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 706
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase_ = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 322
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class lowerCamelCase__ :
lowerCAmelCase = MBartConfig
lowerCAmelCase = {}
lowerCAmelCase = """gelu"""
def __init__( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : List[str]=13 , _lowercase : str=7 , _lowercase : Optional[Any]=True , _lowercase : Tuple=False , _lowercase : Union[str, Any]=99 , _lowercase : str=32 , _lowercase : List[str]=2 , _lowercase : Optional[int]=4 , _lowercase : Dict=37 , _lowercase : Dict=0.1 , _lowercase : str=0.1 , _lowercase : Optional[Any]=20 , _lowercase : Optional[int]=2 , _lowercase : str=1 , _lowercase : Optional[int]=0 , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = eos_token_id
A = pad_token_id
A = bos_token_id
def __a ( self : Tuple ):
A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A = tf.concat([input_ids, eos_tensor] , axis=1 )
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A = prepare_mbart_inputs_dict(_lowercase , _lowercase , _lowercase )
return config, inputs_dict
def __a ( self : Optional[int] , _lowercase : Tuple , _lowercase : int ):
A = TFMBartModel(config=_lowercase ).get_decoder()
A = inputs_dict['input_ids']
A = input_ids[:1, :]
A = inputs_dict['attention_mask'][:1, :]
A = inputs_dict['head_mask']
A = 1
# first forward pass
A = model(_lowercase , attention_mask=_lowercase , head_mask=_lowercase , use_cache=_lowercase )
A , A = outputs.to_tuple()
A = past_key_values[1]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> Tuple:
"""simple docstring"""
if attention_mask is None:
A = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowerCAmelCase = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = False
def __a ( self : int , _lowercase : str , _lowercase : Dict , _lowercase : Tuple , _lowercase : Dict , _lowercase : List[str] ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __a ( self : int ):
A = TFMBartModelTester(self )
A = ConfigTester(self , config_class=_lowercase )
def __a ( self : List[Any] ):
self.config_tester.run_common_tests()
def __a ( self : List[Any] ):
A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowercase )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
lowerCAmelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
lowerCAmelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
lowerCAmelCase = """facebook/mbart-large-en-ro"""
@cached_property
def __a ( self : List[Any] ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __a ( self : Optional[int] ):
A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __a ( self : str , **_lowercase : Union[str, Any] ):
A = self.translate_src_text(**_lowercase )
self.assertListEqual(self.expected_text , _lowercase )
def __a ( self : List[str] , **_lowercase : Tuple ):
A = self.tokenizer(self.src_text , **_lowercase , return_tensors='tf' )
A = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
A = self.tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
return generated_words
@slow
def __a ( self : List[Any] ):
self._assert_generated_batch_equal_expected()
| 690
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : List[str] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = True , **_lowercase : Tuple , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 224}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , default_to_square=_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A = image_std if image_std is not None else OPENAI_CLIP_STD
A = do_convert_rgb
def __a ( self : str , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Union[str, Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Optional[int] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , **_lowercase : int , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , param_name='size' , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' , default_to_square=_lowercase )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A = [convert_to_rgb(_lowercase ) for image in images]
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 690
| 1
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
__a = int(input("Enter number: ").strip())
print(F"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
| 710
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "swin2sr"
lowercase = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : str , snake_case_ : Any=64 , snake_case_ : Optional[Any]=1 , snake_case_ : Tuple=3 , snake_case_ : Any=180 , snake_case_ : Tuple=[6, 6, 6, 6, 6, 6] , snake_case_ : List[str]=[6, 6, 6, 6, 6, 6] , snake_case_ : Dict=8 , snake_case_ : Any=2.0 , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]=0.0 , snake_case_ : List[Any]=0.0 , snake_case_ : Dict=0.1 , snake_case_ : List[Any]="gelu" , snake_case_ : Optional[int]=False , snake_case_ : str=0.02 , snake_case_ : Optional[Any]=1E-5 , snake_case_ : Union[str, Any]=2 , snake_case_ : List[str]=1.0 , snake_case_ : Dict="1conv" , snake_case_ : Union[str, Any]="pixelshuffle" , **snake_case_ : Optional[Any] , ):
super().__init__(**snake_case_ )
snake_case__ : Any = image_size
snake_case__ : Dict = patch_size
snake_case__ : Optional[Any] = num_channels
snake_case__ : Any = embed_dim
snake_case__ : Optional[Any] = depths
snake_case__ : str = len(snake_case_ )
snake_case__ : int = num_heads
snake_case__ : List[Any] = window_size
snake_case__ : List[str] = mlp_ratio
snake_case__ : int = qkv_bias
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : Any = drop_path_rate
snake_case__ : List[Any] = hidden_act
snake_case__ : Union[str, Any] = use_absolute_embeddings
snake_case__ : Any = layer_norm_eps
snake_case__ : Optional[int] = initializer_range
snake_case__ : List[str] = upscale
snake_case__ : Optional[int] = img_range
snake_case__ : Dict = resi_connection
snake_case__ : List[str] = upsampler
| 301
| 0
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
a_ : str = torch.load(SCREAMING_SNAKE_CASE__, map_location="cpu" )
if "model" in sd.keys():
a_ : List[Any] = torch.load(SCREAMING_SNAKE_CASE__, map_location="cpu" )["model"]
# pop unnecessary weights
a_ : Any = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
a_ : Union[str, Any] = sd.pop(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
a_ : int = sd[key]
# We split QKV in separate Q,K,V
a_ : str = key.replace(".qkv_proj.", ".q_proj." )
a_ : List[Any] = key.replace(".qkv_proj.", ".k_proj." )
a_ : List[str] = key.replace(".qkv_proj.", ".v_proj." )
a_ : List[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
a_ , a_ , a_ : Any = torch.split(SCREAMING_SNAKE_CASE__, depth // 3, dim=0 )
a_ : Tuple = q
a_ : Dict = k
a_ : Optional[int] = v
del sd[key]
return sd
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=None ) -> Dict:
a_ : Optional[int] = load_checkpoint(SCREAMING_SNAKE_CASE__ )
if config is not None:
a_ : Optional[Any] = OPTConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
a_ : Tuple = OPTConfig()
a_ : Dict = OPTModel(SCREAMING_SNAKE_CASE__ ).half().eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Check results
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 237
|
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class snake_case_ ( a_ ):
__lowerCAmelCase = "efficientnet"
def __init__( self , a_ = 3 , a_ = 6_0_0 , a_ = 2.0 , a_ = 3.1 , a_ = 8 , a_ = [3, 3, 5, 3, 5, 5, 3] , a_ = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , a_ = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , a_ = [] , a_ = [1, 2, 2, 2, 1, 2, 1] , a_ = [1, 2, 2, 3, 3, 4, 1] , a_ = [1, 6, 6, 6, 6, 6, 6] , a_ = 0.25 , a_ = "swish" , a_ = 2_5_6_0 , a_ = "mean" , a_ = 0.02 , a_ = 0.001 , a_ = 0.99 , a_ = 0.5 , a_ = 0.2 , **a_ , ):
super().__init__(**a_ )
a_ : Tuple = num_channels
a_ : Any = image_size
a_ : Dict = width_coefficient
a_ : int = depth_coefficient
a_ : Optional[Any] = depth_divisor
a_ : Optional[int] = kernel_sizes
a_ : Dict = in_channels
a_ : Dict = out_channels
a_ : Union[str, Any] = depthwise_padding
a_ : Any = strides
a_ : Optional[int] = num_block_repeats
a_ : Tuple = expand_ratios
a_ : str = squeeze_expansion_ratio
a_ : Optional[int] = hidden_act
a_ : List[str] = hidden_dim
a_ : int = pooling_type
a_ : Optional[int] = initializer_range
a_ : List[Any] = batch_norm_eps
a_ : Tuple = batch_norm_momentum
a_ : List[Any] = dropout_rate
a_ : Dict = drop_connect_rate
a_ : str = sum(a_ ) * 4
class snake_case_ ( a_ ):
__lowerCAmelCase = version.parse("1.11" )
@property
def snake_case_ ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case_ ( self ):
return 1e-5
| 237
| 1
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase ) ->list:
"""simple docstring"""
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
a_ = gray_code_sequence_string(UpperCAmelCase )
#
# convert them to integers
for i in range(len(UpperCAmelCase ) ):
a_ = int(sequence[i] , 2 )
return sequence
def UpperCamelCase ( UpperCAmelCase ) ->list:
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
a_ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
a_ = gray_code_sequence_string(bit_count - 1 )
a_ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
a_ = "0" + smaller_sequence[i]
sequence.append(UpperCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
a_ = "1" + smaller_sequence[i]
sequence.append(UpperCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 210
|
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = 'Hello world! cécé herlolip'
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = FairseqRobertaModel.from_pretrained(UpperCAmelCase )
roberta.eval() # disable dropout
a_ = roberta.model.encoder.sentence_encoder
a_ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
a_ = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" , UpperCAmelCase )
a_ = XLMRobertaXLForSequenceClassification(UpperCAmelCase ) if classification_head else XLMRobertaXLForMaskedLM(UpperCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
a_ = roberta_sent_encoder.embed_tokens.weight
a_ = roberta_sent_encoder.embed_positions.weight
a_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
a_ = roberta_sent_encoder.layer_norm.weight
a_ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
a_ = model.roberta.encoder.layer[i]
a_ = roberta_sent_encoder.layers[i]
a_ = layer.attention
a_ = roberta_layer.self_attn_layer_norm.weight
a_ = roberta_layer.self_attn_layer_norm.bias
# self attention
a_ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
a_ = roberta_layer.self_attn.q_proj.weight
a_ = roberta_layer.self_attn.q_proj.bias
a_ = roberta_layer.self_attn.k_proj.weight
a_ = roberta_layer.self_attn.k_proj.bias
a_ = roberta_layer.self_attn.v_proj.weight
a_ = roberta_layer.self_attn.v_proj.bias
# self-attention output
a_ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
a_ = roberta_layer.self_attn.out_proj.weight
a_ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
a_ = roberta_layer.final_layer_norm.weight
a_ = roberta_layer.final_layer_norm.bias
# intermediate
a_ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
a_ = roberta_layer.fca.weight
a_ = roberta_layer.fca.bias
# output
a_ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
a_ = roberta_layer.fca.weight
a_ = roberta_layer.fca.bias
# end of layer
if classification_head:
a_ = roberta.model.classification_heads["mnli"].dense.weight
a_ = roberta.model.classification_heads["mnli"].dense.bias
a_ = roberta.model.classification_heads["mnli"].out_proj.weight
a_ = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
a_ = roberta.model.encoder.lm_head.dense.weight
a_ = roberta.model.encoder.lm_head.dense.bias
a_ = roberta.model.encoder.lm_head.layer_norm.weight
a_ = roberta.model.encoder.lm_head.layer_norm.bias
a_ = roberta.model.encoder.lm_head.weight
a_ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
a_ = roberta.encode(UpperCAmelCase ).unsqueeze(0 ) # batch of size 1
a_ = model(UpperCAmelCase )[0]
if classification_head:
a_ = roberta.model.classification_heads["mnli"](roberta.extract_features(UpperCAmelCase ) )
else:
a_ = roberta.model(UpperCAmelCase )[0]
print(our_output.shape , their_output.shape )
a_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
a_ = torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(UpperCAmelCase ).mkdir(parents=UpperCAmelCase , exist_ok=UpperCAmelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
UpperCamelCase_ = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 210
| 1
|
from functools import lru_cache
def _SCREAMING_SNAKE_CASE ( snake_case ) -> int:
_UpperCAmelCase = 2
_UpperCAmelCase = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(SCREAMING_SNAKE_CASE_ )
if n > 1:
factors.add(SCREAMING_SNAKE_CASE_ )
return factors
@lru_cache
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Optional[int]:
return len(unique_prime_factors(SCREAMING_SNAKE_CASE_ ) )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> List[Any]:
return len(set(SCREAMING_SNAKE_CASE_ ) ) in (0, 1)
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Dict:
_UpperCAmelCase = 2
while True:
# Increment each value of a generated range
_UpperCAmelCase = [base + i for i in range(SCREAMING_SNAKE_CASE_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_UpperCAmelCase = [upf_len(SCREAMING_SNAKE_CASE_ ) for x in group]
checker.append(SCREAMING_SNAKE_CASE_ )
# If all numbers in the list are equal, return the group variable.
if equality(SCREAMING_SNAKE_CASE_ ):
return group
# Increment our base variable by 1
base += 1
def _SCREAMING_SNAKE_CASE ( snake_case = 4 ) -> Optional[Any]:
_UpperCAmelCase = run(SCREAMING_SNAKE_CASE_ )
return results[0] if len(SCREAMING_SNAKE_CASE_ ) else None
if __name__ == "__main__":
print(solution())
| 518
|
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Union[List[PIL.Image.Image], np.ndarray]
__A : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 340
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Any = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 501
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ =inspect.getfile(accelerate.test_utils )
UpperCamelCase__ =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
UpperCamelCase__ =['''accelerate''', '''launch''']
UpperCamelCase__ =Path.home() / '''.cache/huggingface/accelerate'''
UpperCamelCase__ ='''default_config.yaml'''
UpperCamelCase__ =config_folder / config_file
UpperCamelCase__ =config_folder / '''_default_config.yaml'''
UpperCamelCase__ =Path('''tests/test_configs''' )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] ) -> int:
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def UpperCAmelCase__ ( cls : Dict ) -> List[str]:
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
__magic_name__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=lowerCamelCase_ ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(lowerCamelCase_ ), self.test_file_path] , env=os.environ.copy() )
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ ='''test-tpu'''
UpperCamelCase__ ='''us-central1-a'''
UpperCamelCase__ ='''ls'''
UpperCamelCase__ =['''accelerate''', '''tpu-config''']
UpperCamelCase__ ='''cd /usr/share'''
UpperCamelCase__ ='''tests/test_samples/test_command_file.sh'''
UpperCamelCase__ ='''Running gcloud compute tpus tpu-vm ssh'''
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__ : str = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__ : int = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
__magic_name__ : Any = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=lowerCamelCase_ )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
__magic_name__ : Tuple = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
__magic_name__ : str = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : Union[str, Any] = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
__magic_name__ : List[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : str ) -> Dict:
__magic_name__ : Tuple = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : Any ) -> Tuple:
__magic_name__ : Tuple = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
| 501
| 1
|
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : str )->str:
assert x is not None
assert y is not None
A__ = len(UpperCamelCase__ )
A__ = len(UpperCamelCase__ )
# declaring the array for storing the dp values
A__ = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
A__ = 1 if x[i - 1] == y[j - 1] else 0
A__ = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
A__ = ''''''
A__ , A__ = m, n
while i > 0 and j > 0:
A__ = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
A__ = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
a__: Tuple = 'AGGTAB'
a__: Tuple = 'GXTXAYB'
a__: List[Any] = 4
a__: Optional[Any] = 'GTAB'
a__ , a__: List[str] = longest_common_subsequence(a, b)
print('len =', ln, ', sub-sequence =', subseq)
import doctest
doctest.testmod()
| 190
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = hf_hub_download(
repo_id='''nateraw/video-demo''',filename='''archery.mp4''',repo_type='''dataset''' )
A__ = VideoClassificationPipeline(model=__lowerCamelCase,image_processor=__lowerCamelCase,top_k=2 )
A__ = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
for example in examples:
A__ = video_classifier(__lowerCamelCase )
self.assertEqual(
__lowerCamelCase,[
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
],)
@require_torch
def UpperCamelCase ( self ):
A__ = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
A__ = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10},crop_size={'''height''': 10, '''width''': 10} )
A__ = pipeline(
'''video-classification''',model=__lowerCamelCase,feature_extractor=__lowerCamelCase,frame_sampling_rate=4 )
A__ = hf_hub_download(repo_id='''nateraw/video-demo''',filename='''archery.mp4''',repo_type='''dataset''' )
A__ = video_classifier(__lowerCamelCase,top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],)
A__ = video_classifier(
[
video_file_path,
video_file_path,
],top_k=2,)
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
],)
@require_tf
def UpperCamelCase ( self ):
pass
| 190
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase: Union[str, Any] = logging.get_logger(__name__)
class a__( lowerCamelCase__ ):
lowercase__ = ["""pixel_values"""]
def __init__( self : str , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : float = None , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 2_55 , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , **__snake_case : str , ):
super().__init__(**__snake_case )
a : Tuple = size if size is not None else {'shortest_edge': 3_84}
a : Optional[int] = get_size_dict(__snake_case , default_to_square=__snake_case )
a : List[str] = do_resize
a : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
a : int = crop_pct if crop_pct is not None else 2_24 / 2_56
a : str = resample
a : Any = do_rescale
a : Optional[int] = rescale_factor
a : List[str] = do_normalize
a : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self : int , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : float , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[Any] , ):
a : List[str] = get_size_dict(__snake_case , default_to_square=__snake_case )
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
a : List[Any] = size['shortest_edge']
if shortest_edge < 3_84:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
a : Optional[Any] = int(shortest_edge / crop_pct )
a : int = get_resize_output_image_size(__snake_case , size=__snake_case , default_to_square=__snake_case )
a : Union[str, Any] = resize(image=__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__snake_case , size=(shortest_edge, shortest_edge) , data_format=__snake_case , **__snake_case )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__snake_case , size=(shortest_edge, shortest_edge) , resample=__snake_case , data_format=__snake_case , **__snake_case )
def lowercase_ ( self : Optional[Any] , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : str , ):
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def lowercase_ ( self : Dict , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Union[str, Any] , ):
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def lowercase_ ( self : List[str] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : float = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : Any , ):
a : List[str] = do_resize if do_resize is not None else self.do_resize
a : List[Any] = crop_pct if crop_pct is not None else self.crop_pct
a : Tuple = resample if resample is not None else self.resample
a : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
a : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
a : int = image_mean if image_mean is not None else self.image_mean
a : Optional[Any] = image_std if image_std is not None else self.image_std
a : Union[str, Any] = size if size is not None else self.size
a : List[Any] = get_size_dict(__snake_case , default_to_square=__snake_case )
a : Dict = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
a : Dict = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
a : str = [self.resize(image=__snake_case , size=__snake_case , crop_pct=__snake_case , resample=__snake_case ) for image in images]
if do_rescale:
a : List[str] = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
if do_normalize:
a : Tuple = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images]
a : List[str] = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
a : Tuple = {'pixel_values': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 195
|
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
lowerCAmelCase: Optional[int] = logging.get_logger(__name__)
lowerCAmelCase: Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
lowerCAmelCase: int = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def lowerCamelCase__ ( _A , _A , _A , _A , _A ):
for attribute in key.split('.' ):
a : Tuple = getattr(_A , _A )
if weight_type is not None:
a : Union[str, Any] = getattr(_A , _A ).shape
else:
a : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a : str = value
elif weight_type == "weight_g":
a : Optional[int] = value
elif weight_type == "weight_v":
a : List[Any] = value
elif weight_type == "bias":
a : Any = value
else:
a : Optional[Any] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCamelCase__ ( _A , _A ):
a : Optional[int] = []
a : Tuple = fairseq_model.state_dict()
a : List[Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
a : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
_A , _A , _A , _A , hf_model.config.feat_extract_norm == 'group' , )
a : int = True
else:
for key, mapped_key in MAPPING.items():
a : Any = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
a : str = True
if "*" in mapped_key:
a : Union[str, Any] = name.split(_A )[0].split('.' )[-2]
a : List[str] = mapped_key.replace('*' , _A )
if "weight_g" in name:
a : Optional[int] = 'weight_g'
elif "weight_v" in name:
a : Any = 'weight_v'
elif "bias" in name:
a : str = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a : List[Any] = 'weight'
else:
a : Any = None
set_recursively(_A , _A , _A , _A , _A )
continue
if not is_used:
unused_weights.append(_A )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase__ ( _A , _A , _A , _A , _A ):
a : List[str] = full_name.split('conv_layers.' )[-1]
a : Any = name.split('.' )
a : Optional[Any] = int(items[0] )
a : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
a : Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
a : Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_A )
@torch.no_grad()
def lowerCamelCase__ ( _A , _A , _A=None , _A=None , _A=True ):
if config_path is not None:
a : str = UniSpeechSatConfig.from_pretrained(_A )
else:
a : Dict = UniSpeechSatConfig()
a : Any = ''
if is_finetuned:
a : Tuple = UniSpeechSatForCTC(_A )
else:
a : List[str] = UniSpeechSatForPreTraining(_A )
a , a , a : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
a : str = model[0].eval()
recursively_load_weights(_A , _A )
hf_wavavec.save_pretrained(_A )
if __name__ == "__main__":
lowerCAmelCase: Tuple = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase: Optional[int] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 195
| 1
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : float | Decimal , lowercase : float = 10**-10 ):
'''simple docstring'''
lowerCamelCase_ = a
while True:
lowerCamelCase_ = Decimal(lowercase ) - (
Decimal(eval(lowercase ) ) / Decimal(eval(str(diff(lowercase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowercase ) ) < precision: # noqa: S307
return float(lowercase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""")
# Find Square Root of 5
print(F"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""")
# Exponential Roots
print(F"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""")
| 70
|
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_a : Dict = logging.get_logger(__name__)
_a : str = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
_a : Tuple = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
_a : List[str] = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
_a : Tuple = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
_a : Any = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
_a : Any = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
_a : List[str] = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
_a : Optional[Any] = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
_a : str = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
_a : str = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
_a : str = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
_a : Optional[Any] = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
_a : Tuple = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
_a : str = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
_a : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_a : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_a : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_a : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_a : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_a : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_a : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_a : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_a : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_a : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_a : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_a : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_a : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_a : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __A (_BaseAutoModelClass ):
snake_case :Any = FLAX_MODEL_MAPPING
_a : List[Any] = auto_class_update(FlaxAutoModel)
class __A (_BaseAutoModelClass ):
snake_case :List[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_a : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class __A (_BaseAutoModelClass ):
snake_case :Optional[int] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_a : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class __A (_BaseAutoModelClass ):
snake_case :List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_a : Dict = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class __A (_BaseAutoModelClass ):
snake_case :str = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_a : Any = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class __A (_BaseAutoModelClass ):
snake_case :Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_a : Union[str, Any] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class __A (_BaseAutoModelClass ):
snake_case :List[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_a : Optional[int] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class __A (_BaseAutoModelClass ):
snake_case :Any = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_a : int = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class __A (_BaseAutoModelClass ):
snake_case :Any = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_a : Tuple = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class __A (_BaseAutoModelClass ):
snake_case :Tuple = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_a : str = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class __A (_BaseAutoModelClass ):
snake_case :Union[str, Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_a : List[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class __A (_BaseAutoModelClass ):
snake_case :List[str] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_a : str = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class __A (_BaseAutoModelClass ):
snake_case :Tuple = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_a : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 168
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__lowercase = 192
__lowercase = 768
__lowercase = 12
__lowercase = 3
__lowercase = [800, 1_333]
__lowercase = False
elif yolos_name == "yolos_s_dWr":
__lowercase = 330
__lowercase = 14
__lowercase = 6
__lowercase = 1_320
elif "yolos_s" in yolos_name:
__lowercase = 384
__lowercase = 1_536
__lowercase = 12
__lowercase = 6
elif "yolos_b" in yolos_name:
__lowercase = [800, 1_344]
__lowercase = 91
__lowercase = """huggingface/label-files"""
__lowercase = """coco-detection-id2label.json"""
__lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
return config
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
__lowercase = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: config.hidden_size, :]
__lowercase = in_proj_bias[: config.hidden_size]
__lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase = in_proj_weight[-config.hidden_size :, :]
__lowercase = in_proj_bias[-config.hidden_size :]
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if "backbone" in name:
__lowercase = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
__lowercase = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
__lowercase = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
__lowercase = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
__lowercase = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowercase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
__lowercase = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__lowercase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__lowercase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__lowercase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowercase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowercase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowercase = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
__lowercase = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
__lowercase = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
__lowercase = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowercase = orig_state_dict.pop(lowerCamelCase )
if "qkv" in key:
__lowercase = key.split(""".""" )
__lowercase = int(key_split[2] )
__lowercase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__lowercase = val[:dim, :]
__lowercase = val[
dim : dim * 2, :
]
__lowercase = val[-dim:, :]
else:
__lowercase = val[:dim]
__lowercase = val[dim : dim * 2]
__lowercase = val[-dim:]
else:
__lowercase = val
return orig_state_dict
def snake_case ( ):
'''simple docstring'''
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
__lowercase = get_yolos_config(lowerCamelCase )
# load original state_dict
__lowercase = torch.load(lowerCamelCase , map_location="""cpu""" )["""model"""]
# load 🤗 model
__lowercase = YolosForObjectDetection(lowerCamelCase )
model.eval()
__lowercase = convert_state_dict(lowerCamelCase , lowerCamelCase )
model.load_state_dict(lowerCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
__lowercase = 800 if yolos_name != """yolos_ti""" else 512
__lowercase = YolosImageProcessor(format="""coco_detection""" , size=lowerCamelCase )
__lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" )
__lowercase = model(**lowerCamelCase )
__lowercase , __lowercase = outputs.logits, outputs.pred_boxes
__lowercase , __lowercase = None, None
if yolos_name == "yolos_ti":
__lowercase = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
__lowercase = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
__lowercase = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
__lowercase = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
__lowercase = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
__lowercase = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
__lowercase = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
__lowercase = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
__lowercase = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
__lowercase = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase , atol=1e-4 )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
__lowercase = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
__lowercase = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase , organization="""hustvl""" )
model.push_to_hub(lowerCamelCase , organization="""hustvl""" )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"""
""" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCamelCase : Any = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 704
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : list[tuple[float, float]] ) -> Any:
"""simple docstring"""
__lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowercase = len(_lowerCAmelCase ) - 1
def _a ( self : Tuple , _lowerCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_lowerCAmelCase ) , 5 ) == 1
return output_values
def _a ( self : List[str] , _lowerCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = self.basis_function(_lowerCAmelCase )
__lowercase = 0.0
__lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _a ( self : Optional[int] , _lowerCAmelCase : float = 0.01 ) -> Union[str, Any]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
__lowercase = [] # x coordinates of points to plot
__lowercase = [] # y coordinates of points to plot
__lowercase = 0.0
while t <= 1:
__lowercase = self.bezier_curve_function(_lowerCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowercase = [i[0] for i in self.list_of_points]
__lowercase = [i[1] for i in self.list_of_points]
plt.plot(
_lowerCAmelCase , _lowerCAmelCase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(_lowerCAmelCase , _lowerCAmelCase , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 53
| 0
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
a_ = logging.get_logger(__name__)
def __lowercase ( lowerCamelCase : List[Any] , lowerCamelCase : str ):
UpperCamelCase_ : Tuple = set()
UpperCamelCase_ : Dict = []
def parse_line(lowerCamelCase : int ):
for line in fp:
if isinstance(lowerCamelCase , lowerCamelCase ):
UpperCamelCase_ : Union[str, Any] = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase ) > 0:
UpperCamelCase_ : Tuple = '\n'.join(lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(F": {x}: " in warning for x in targets ):
selected_warnings.add(lowerCamelCase )
buffer.clear()
continue
else:
UpperCamelCase_ : Optional[Any] = line.strip()
buffer.append(lowerCamelCase )
if from_gh:
for filename in os.listdir(lowerCamelCase ):
UpperCamelCase_ : int = os.path.join(lowerCamelCase , lowerCamelCase )
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
else:
try:
with zipfile.ZipFile(lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
except Exception:
logger.warning(
F"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." )
return selected_warnings
def __lowercase ( lowerCamelCase : str , lowerCamelCase : Dict ):
UpperCamelCase_ : Optional[Any] = set()
UpperCamelCase_ : Tuple = [os.path.join(lowerCamelCase , lowerCamelCase ) for p in os.listdir(lowerCamelCase ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase , lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def __lowercase ( lowerCamelCase : Tuple ):
return values.split(',' )
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
a_ = parser.parse_args()
a_ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
a_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
a_ = extract_warnings(args.output_dir, args.targets)
a_ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 417
|
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a_ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
a_ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
a_ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : str , snake_case : Any , snake_case : Optional[Any]=4 , snake_case : str=False ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = compute_bleu(
reference_corpus=snake_case , translation_corpus=snake_case , max_order=snake_case , smooth=snake_case )
((UpperCamelCase_), (UpperCamelCase_), (UpperCamelCase_), (UpperCamelCase_), (UpperCamelCase_), (UpperCamelCase_)) : int = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 417
| 1
|
from timeit import timeit
def snake_case_ ( __lowercase ):
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def snake_case_ ( __lowercase ):
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ : List[Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def snake_case_ ( ):
def do_benchmark(__lowercase ) -> None:
UpperCAmelCase_ : List[Any] = '''import __main__ as z'''
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(__lowercase ) = }''' )
UpperCAmelCase_ : Any = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=__lowercase )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(__lowercase ) = }''' )
UpperCAmelCase_ : str = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=__lowercase , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (2_5, 3_7, 5_8, 0):
do_benchmark(__lowercase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 641
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = 'timesformer'
def __init__( self : int , __snake_case : Any=224 , __snake_case : str=16 , __snake_case : Any=3 , __snake_case : List[Any]=8 , __snake_case : Dict=768 , __snake_case : Dict=12 , __snake_case : Tuple=12 , __snake_case : Dict=3_072 , __snake_case : str="gelu" , __snake_case : Union[str, Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.02 , __snake_case : Optional[Any]=1E-6 , __snake_case : List[Any]=True , __snake_case : List[str]="divided_space_time" , __snake_case : Optional[int]=0 , **__snake_case : Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : int = num_frames
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Dict = attention_type
UpperCAmelCase_ : str = drop_path_rate
| 641
| 1
|
"""simple docstring"""
import unittest
from transformers import DonutProcessor
__lowerCamelCase = "naver-clova-ix/donut-base"
class _lowercase ( unittest.TestCase ):
def lowerCAmelCase__ ( self ):
__magic_name__ = DonutProcessor.from_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
__magic_name__ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
__magic_name__ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
__magic_name__ = self.processor.tokenajson(UpperCamelCase_ )
self.assertDictEqual(UpperCamelCase_ , UpperCamelCase_ )
| 490
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self , A = True , A = None , A = None , A = PILImageResampling.BILINEAR , A = True , A = 1 / 255 , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
_SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 384}
_SCREAMING_SNAKE_CASE = get_size_dict(A , default_to_square=A )
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
# Default value set here for backwards compatibility where the value in config is None
_SCREAMING_SNAKE_CASE = crop_pct if crop_pct is not None else 224 / 256
_SCREAMING_SNAKE_CASE = resample
_SCREAMING_SNAKE_CASE = do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case_( self , A , A , A , A = PILImageResampling.BICUBIC , A = None , **A , ) -> np.ndarray:
_SCREAMING_SNAKE_CASE = get_size_dict(A , default_to_square=A )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
_SCREAMING_SNAKE_CASE = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_SCREAMING_SNAKE_CASE = int(shortest_edge / crop_pct )
_SCREAMING_SNAKE_CASE = get_resize_output_image_size(A , size=A , default_to_square=A )
_SCREAMING_SNAKE_CASE = resize(image=A , size=A , resample=A , data_format=A , **A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=A , size=(shortest_edge, shortest_edge) , data_format=A , **A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
A , size=(shortest_edge, shortest_edge) , resample=A , data_format=A , **A )
def snake_case_( self , A , A , A = None , **A , ) -> List[str]:
return rescale(A , scale=A , data_format=A , **A )
def snake_case_( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def snake_case_( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
_SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE = crop_pct if crop_pct is not None else self.crop_pct
_SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE = size if size is not None else self.size
_SCREAMING_SNAKE_CASE = get_size_dict(A , default_to_square=A )
_SCREAMING_SNAKE_CASE = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE = [to_numpy_array(A ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE = [self.resize(image=A , size=A , crop_pct=A , resample=A ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE = [self.normalize(image=A , mean=A , std=A ) for image in images]
_SCREAMING_SNAKE_CASE = [to_channel_dimension_format(A , A ) for image in images]
_SCREAMING_SNAKE_CASE = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 314
| 0
|
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_A , _A )
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = emb.weight.shape
UpperCAmelCase = nn.Linear(_A , _A , bias=_A )
UpperCAmelCase = emb.weight.data
return lin_layer
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ):
"""simple docstring"""
UpperCAmelCase = {}
for old_key in state_dict.keys():
UpperCAmelCase = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCAmelCase = key.replace("moe_layer.experts.0" , F'''ffn.experts.expert_{expert_idx}''' )
else:
UpperCAmelCase = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
UpperCAmelCase = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
UpperCAmelCase = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
UpperCAmelCase = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
UpperCAmelCase = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
UpperCAmelCase = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
UpperCAmelCase = key.replace("final_layer_norm" , "ff_layer_norm" )
UpperCAmelCase = state_dict[old_key]
return new_dict
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = WEIGHTS_NAME ):
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = 0
os.makedirs(_A , exist_ok=_A )
for expert in range(_A ):
UpperCAmelCase = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_A ):
UpperCAmelCase = torch.load(_A )["model"]
remove_ignore_keys_(_A )
UpperCAmelCase = rename_fairseq_keys(_A , _A )
UpperCAmelCase = os.path.join(
_A , weights_name.replace(".bin" , F'''-{len(_A )+1:05d}-of-???.bin''' ) )
torch.save(_A , _A )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_A )[0]].dtype )
# Add the last block
UpperCAmelCase = os.path.join(_A , weights_name.replace(".bin" , F'''-{len(_A )+1:05d}-of-???.bin''' ) )
UpperCAmelCase = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(_A )
UpperCAmelCase = rename_fairseq_keys(_A , _A )
UpperCAmelCase = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_A ) == 1:
UpperCAmelCase = os.path.join(_A , _A )
torch.save(_A , _A )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_A , _A )
# Otherwise, let's build the index
UpperCAmelCase = {}
for idx, shard in enumerate(_A ):
UpperCAmelCase = weights_name.replace(".bin" , F'''-{idx+1:05d}-of-{len(_A ):05d}.bin''' )
UpperCAmelCase = os.path.join(_A , weights_name.replace(".bin" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_A , os.path.join(_A , _A ) )
for key in shard:
UpperCAmelCase = shard_file
# Add the metadata
UpperCAmelCase = {"total_size": total_size}
UpperCAmelCase = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_A , _A ) , "w" , encoding="utf-8" ) as f:
UpperCAmelCase = json.dumps(_A , indent=2 , sort_keys=_A ) + "\n"
f.write(_A )
return metadata, index
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__lowerCAmelCase =parser.parse_args()
__lowerCAmelCase =shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__lowerCAmelCase =NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__lowerCAmelCase =NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 719
|
from typing import Any
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
if not input_list:
return []
UpperCAmelCase = [input_list.count(_lowerCAmelCase ) for value in input_list]
UpperCAmelCase = max(_lowerCAmelCase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_lowerCAmelCase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 405
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class _snake_case ( _A ):
_A = 'falcon'
_A = ['past_key_values']
def __init__( self ,UpperCamelCase=65_024 ,UpperCamelCase=4_544 ,UpperCamelCase=32 ,UpperCamelCase=71 ,UpperCamelCase=1E-5 ,UpperCamelCase=0.02 ,UpperCamelCase=True ,UpperCamelCase=0.0 ,UpperCamelCase=0.0 ,UpperCamelCase=None ,UpperCamelCase=False ,UpperCamelCase=False ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=False ,UpperCamelCase=11 ,UpperCamelCase=11 ,**UpperCamelCase ,) -> str:
snake_case__ :List[Any] = vocab_size
# Backward compatibility with n_embed kwarg
snake_case__ :Dict = kwargs.pop("n_embed" ,UpperCamelCase )
snake_case__ :int = hidden_size if n_embed is None else n_embed
snake_case__ :str = num_hidden_layers
snake_case__ :List[Any] = num_attention_heads
snake_case__ :Dict = layer_norm_epsilon
snake_case__ :Dict = initializer_range
snake_case__ :Tuple = use_cache
snake_case__ :Tuple = hidden_dropout
snake_case__ :Optional[Any] = attention_dropout
snake_case__ :Union[str, Any] = bos_token_id
snake_case__ :Union[str, Any] = eos_token_id
snake_case__ :str = num_attention_heads if num_kv_heads is None else num_kv_heads
snake_case__ :int = alibi
snake_case__ :Tuple = new_decoder_architecture
snake_case__ :Dict = multi_query # Ignored when new_decoder_architecture is True
snake_case__ :List[str] = parallel_attn
snake_case__ :Optional[int] = bias
super().__init__(bos_token_id=UpperCamelCase ,eos_token_id=UpperCamelCase ,**UpperCamelCase )
@property
def lowerCAmelCase_ ( self ) -> Optional[int]:
return self.hidden_size // self.num_attention_heads
@property
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
return not self.alibi
| 241
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__UpperCAmelCase : Optional[Any] = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 241
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self :List[str] ,__UpperCAmelCase :str ,__UpperCAmelCase :List[str]=7 ,__UpperCAmelCase :List[Any]=3 ,__UpperCAmelCase :int=18 ,__UpperCAmelCase :Any=30 ,__UpperCAmelCase :List[str]=4_00 ,__UpperCAmelCase :Dict=True ,__UpperCAmelCase :Tuple=None ,__UpperCAmelCase :Optional[Any]=True ,) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Tuple = size if size is not None else {'''height''': 18, '''width''': 18}
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : List[str] = image_size
lowerCamelCase__ : Any = min_resolution
lowerCamelCase__ : List[Any] = max_resolution
lowerCamelCase__ : Any = do_resize
lowerCamelCase__ : Union[str, Any] = size
lowerCamelCase__ : str = apply_ocr
def lowercase_ ( self :str ) -> Optional[int]:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowercase_ ( self :str ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = LayoutLMvaImageProcessingTester(self )
@property
def lowercase_ ( self :List[Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self :Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase ,'''do_resize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase ,'''size''' ) )
self.assertTrue(hasattr(__UpperCAmelCase ,'''apply_ocr''' ) )
def lowercase_ ( self :int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''height''': 18, '''width''': 18} )
lowerCamelCase__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'''height''': 42, '''width''': 42} )
def lowercase_ ( self :List[str] ) -> List[str]:
"""simple docstring"""
pass
def lowercase_ ( self :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,Image.Image )
# Test not batched input
lowerCamelCase__ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
self.assertIsInstance(encoding.words ,__UpperCAmelCase )
self.assertIsInstance(encoding.boxes ,__UpperCAmelCase )
# Test batched
lowerCamelCase__ : Union[str, Any] = image_processing(__UpperCAmelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def lowercase_ ( self :List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase ,numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,np.ndarray )
# Test not batched input
lowerCamelCase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
lowerCamelCase__ : List[Any] = image_processing(__UpperCAmelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def lowercase_ ( self :Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase ,torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,torch.Tensor )
# Test not batched input
lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
lowerCamelCase__ : List[Any] = image_processing(__UpperCAmelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def lowercase_ ( self :Any ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase__ : str = load_dataset('''hf-internal-testing/fixtures_docvqa''' ,split='''test''' )
lowerCamelCase__ : Union[str, Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
lowerCamelCase__ : List[Any] = image_processing(__UpperCAmelCase ,return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase__ : Optional[int] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
lowerCamelCase__ : Dict = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,__UpperCAmelCase )
self.assertListEqual(encoding.boxes ,__UpperCAmelCase )
# with apply_OCR = False
lowerCamelCase__ : List[str] = LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase )
lowerCamelCase__ : int = image_processing(__UpperCAmelCase ,return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_24, 2_24) )
| 121
|
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def lowercase_ ( self :int ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase ,'''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase ,'''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase ,'''num_encoder_blocks''' ) )
class __SCREAMING_SNAKE_CASE :
def __init__( self :Union[str, Any] ,__UpperCAmelCase :str ,__UpperCAmelCase :Optional[int]=13 ,__UpperCAmelCase :Any=64 ,__UpperCAmelCase :List[Any]=3 ,__UpperCAmelCase :str=4 ,__UpperCAmelCase :Any=[2, 2, 2, 2] ,__UpperCAmelCase :List[Any]=[8, 4, 2, 1] ,__UpperCAmelCase :Dict=[16, 32, 64, 1_28] ,__UpperCAmelCase :Any=[1, 4, 8, 16] ,__UpperCAmelCase :int=[1, 2, 4, 8] ,__UpperCAmelCase :Union[str, Any]=True ,__UpperCAmelCase :Dict=True ,__UpperCAmelCase :Optional[int]="gelu" ,__UpperCAmelCase :Tuple=0.1 ,__UpperCAmelCase :List[Any]=0.1 ,__UpperCAmelCase :List[Any]=0.02 ,__UpperCAmelCase :str=3 ,__UpperCAmelCase :Tuple=None ,) -> int:
"""simple docstring"""
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : Any = image_size
lowerCamelCase__ : Tuple = num_channels
lowerCamelCase__ : Union[str, Any] = num_encoder_blocks
lowerCamelCase__ : Union[str, Any] = sr_ratios
lowerCamelCase__ : int = depths
lowerCamelCase__ : Optional[Any] = hidden_sizes
lowerCamelCase__ : List[Any] = downsampling_rates
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : Tuple = is_training
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Optional[Any] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Any = scope
def lowercase_ ( self :Dict ) -> str:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
lowerCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self :Tuple ) -> Any:
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size ,num_channels=self.num_channels ,num_encoder_blocks=self.num_encoder_blocks ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def lowercase_ ( self :Any ,__UpperCAmelCase :Tuple ,__UpperCAmelCase :Union[str, Any] ,__UpperCAmelCase :Any ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = SegformerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCamelCase__ : str = model(__UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def lowercase_ ( self :Union[str, Any] ,__UpperCAmelCase :str ,__UpperCAmelCase :Tuple ,__UpperCAmelCase :Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Dict = SegformerForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCamelCase__ : Tuple = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowerCamelCase__ : List[Any] = model(__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss ,0.0 )
def lowercase_ ( self :str ,__UpperCAmelCase :Dict ,__UpperCAmelCase :Union[str, Any] ,__UpperCAmelCase :Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Union[str, Any] = SegformerForSemanticSegmentation(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCamelCase__ : str = torch.randint(0 ,1 ,(self.batch_size, self.image_size, self.image_size) ).to(__UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = model(__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertGreater(result.loss ,0.0 )
def lowercase_ ( self :Union[str, Any] ) -> int:
"""simple docstring"""
lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = config_and_inputs
lowerCamelCase__ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def lowercase_ ( self :int ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = SegformerModelTester(self )
lowerCamelCase__ : int = SegformerConfigTester(self ,config_class=__UpperCAmelCase )
def lowercase_ ( self :Tuple ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self :Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowercase_ ( self :Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__UpperCAmelCase )
def lowercase_ ( self :Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__UpperCAmelCase )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def lowercase_ ( self :int ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def lowercase_ ( self :Optional[int] ) -> List[str]:
"""simple docstring"""
pass
def lowercase_ ( self :Any ) -> str:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[str] = model_class(__UpperCAmelCase )
lowerCamelCase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Union[str, Any] = [*signature.parameters.keys()]
lowerCamelCase__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,__UpperCAmelCase )
def lowercase_ ( self :List[str] ) -> int:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : int = True
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : List[Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
lowerCamelCase__ : Union[str, Any] = outputs.attentions
lowerCamelCase__ : int = sum(self.model_tester.depths )
self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : List[Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Any = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
lowerCamelCase__ : Dict = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase )
# verify the first attentions (first block, first layer)
lowerCamelCase__ : Union[str, Any] = (self.model_tester.image_size // 4) ** 2
lowerCamelCase__ : List[str] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
# verify the last attentions (last block, last layer)
lowerCamelCase__ : Dict = (self.model_tester.image_size // 32) ** 2
lowerCamelCase__ : Optional[Any] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) ,[self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] ,)
lowerCamelCase__ : Union[str, Any] = len(__UpperCAmelCase )
# Check attention is always last and order is fine
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Any = True
lowerCamelCase__ : Any = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Any = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
self.assertEqual(out_len + 1 ,len(__UpperCAmelCase ) )
lowerCamelCase__ : Any = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase )
# verify the first attentions (first block, first layer)
lowerCamelCase__ : Tuple = (self.model_tester.image_size // 4) ** 2
lowerCamelCase__ : str = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
def lowercase_ ( self :int ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(__UpperCAmelCase :Union[str, Any] ,__UpperCAmelCase :Any ,__UpperCAmelCase :Optional[Any] ):
lowerCamelCase__ : List[str] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : str = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
lowerCamelCase__ : Any = outputs.hidden_states
lowerCamelCase__ : Optional[int] = self.model_tester.num_encoder_blocks
self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = True
check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] = True
check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
def lowercase_ ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCAmelCase ):
continue
lowerCamelCase__ : Union[str, Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
lowerCamelCase__ : str = self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ,return_labels=__UpperCAmelCase )
lowerCamelCase__ : List[str] = model(**__UpperCAmelCase ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@slow
def lowercase_ ( self :Tuple ) -> Tuple:
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : List[Any] = SegformerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __a ( ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def lowercase_ ( self :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = SegformerImageProcessor(
image_scale=(5_12, 5_12) ,keep_ratio=__UpperCAmelCase ,align=__UpperCAmelCase ,do_random_crop=__UpperCAmelCase )
lowerCamelCase__ : List[Any] = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
__UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Tuple = image_processor(images=__UpperCAmelCase ,return_tensors='''pt''' )
lowerCamelCase__ : Optional[Any] = encoded_inputs.pixel_values.to(__UpperCAmelCase )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(__UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape ,__UpperCAmelCase )
lowerCamelCase__ : Dict = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,__UpperCAmelCase ,atol=1E-4 ) )
@slow
def lowercase_ ( self :List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : str = SegformerImageProcessor(
image_scale=(5_12, 5_12) ,keep_ratio=__UpperCAmelCase ,align=__UpperCAmelCase ,do_random_crop=__UpperCAmelCase )
lowerCamelCase__ : Dict = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(__UpperCAmelCase )
lowerCamelCase__ : int = prepare_img()
lowerCamelCase__ : Any = image_processor(images=__UpperCAmelCase ,return_tensors='''pt''' )
lowerCamelCase__ : Dict = encoded_inputs.pixel_values.to(__UpperCAmelCase )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(__UpperCAmelCase )
lowerCamelCase__ : Dict = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape ,__UpperCAmelCase )
lowerCamelCase__ : List[str] = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,__UpperCAmelCase ,atol=1E-1 ) )
@slow
def lowercase_ ( self :int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = SegformerImageProcessor(
image_scale=(5_12, 5_12) ,keep_ratio=__UpperCAmelCase ,align=__UpperCAmelCase ,do_random_crop=__UpperCAmelCase )
lowerCamelCase__ : Tuple = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
__UpperCAmelCase )
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : Optional[Any] = image_processor(images=__UpperCAmelCase ,return_tensors='''pt''' )
lowerCamelCase__ : int = encoded_inputs.pixel_values.to(__UpperCAmelCase )
with torch.no_grad():
lowerCamelCase__ : int = model(__UpperCAmelCase )
lowerCamelCase__ : Dict = outputs.logits.detach().cpu()
lowerCamelCase__ : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase ,target_sizes=[(5_00, 3_00)] )
lowerCamelCase__ : Optional[int] = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape ,__UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
lowerCamelCase__ : Optional[int] = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape ,__UpperCAmelCase )
| 121
| 1
|
from collections.abc import Callable
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case = None ):
'''simple docstring'''
UpperCamelCase__ = []
# Stores indexes of each item for supporting updates and deletion.
UpperCamelCase__ = {}
# Stores current size of heap.
UpperCamelCase__ = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
UpperCamelCase__ = key or (lambda snake_case : x)
def snake_case__ ( self , snake_case ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def snake_case__ ( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = int(2 * i + 1 )
return left if 0 < left < self.size else None
def snake_case__ ( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = int(2 * i + 2 )
return right if 0 < right < self.size else None
def snake_case__ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__, UpperCamelCase__ = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
UpperCamelCase__, UpperCamelCase__ = self.arr[j], self.arr[i]
def snake_case__ ( self , snake_case , snake_case ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def snake_case__ ( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = self._left(__snake_case )
UpperCamelCase__ = self._right(__snake_case )
UpperCamelCase__ = i
if left is not None and not self._cmp(__snake_case , __snake_case ):
UpperCamelCase__ = left
if right is not None and not self._cmp(__snake_case , __snake_case ):
UpperCamelCase__ = right
return valid_parent
def snake_case__ ( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = self._parent(__snake_case )
while parent is not None and not self._cmp(__snake_case , __snake_case ):
self._swap(__snake_case , __snake_case )
UpperCamelCase__, UpperCamelCase__ = parent, self._parent(__snake_case )
def snake_case__ ( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = self._get_valid_parent(__snake_case )
while valid_parent != index:
self._swap(__snake_case , __snake_case )
UpperCamelCase__, UpperCamelCase__ = valid_parent, self._get_valid_parent(__snake_case )
def snake_case__ ( self , snake_case , snake_case ):
'''simple docstring'''
if item not in self.pos_map:
return
UpperCamelCase__ = self.pos_map[item]
UpperCamelCase__ = [item, self.key(__snake_case )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__snake_case )
self._heapify_down(__snake_case )
def snake_case__ ( self , snake_case ):
'''simple docstring'''
if item not in self.pos_map:
return
UpperCamelCase__ = self.pos_map[item]
del self.pos_map[item]
UpperCamelCase__ = self.arr[self.size - 1]
UpperCamelCase__ = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__snake_case )
self._heapify_down(__snake_case )
def snake_case__ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(__snake_case )] )
else:
UpperCamelCase__ = [item, self.key(__snake_case )]
UpperCamelCase__ = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def snake_case__ ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def UpperCamelCase_( )-> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 551
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 144
| 0
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
a_ = logging.get_logger(__name__)
# General docstring
a_ = 'MobileNetV1Config'
# Base docstring
a_ = 'google/mobilenet_v1_1.0_224'
a_ = [1, 1024, 7, 7]
# Image classification docstring
a_ = 'google/mobilenet_v1_1.0_224'
a_ = 'tabby, tabby cat'
a_ = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCamelCase__ ( _a , _a , _a=None):
SCREAMING_SNAKE_CASE : str = {}
if isinstance(_a , _a):
SCREAMING_SNAKE_CASE : str = model.mobilenet_va
else:
SCREAMING_SNAKE_CASE : Optional[int] = model
SCREAMING_SNAKE_CASE : Dict = "MobilenetV1/Conv2d_0/"
SCREAMING_SNAKE_CASE : Any = backbone.conv_stem.convolution.weight
SCREAMING_SNAKE_CASE : Tuple = backbone.conv_stem.normalization.bias
SCREAMING_SNAKE_CASE : Tuple = backbone.conv_stem.normalization.weight
SCREAMING_SNAKE_CASE : Dict = backbone.conv_stem.normalization.running_mean
SCREAMING_SNAKE_CASE : List[str] = backbone.conv_stem.normalization.running_var
for i in range(13):
SCREAMING_SNAKE_CASE : Tuple = i + 1
SCREAMING_SNAKE_CASE : Optional[int] = i * 2
SCREAMING_SNAKE_CASE : Dict = backbone.layer[pt_index]
SCREAMING_SNAKE_CASE : Any = f"MobilenetV1/Conv2d_{tf_index}_depthwise/"
SCREAMING_SNAKE_CASE : str = pointer.convolution.weight
SCREAMING_SNAKE_CASE : Dict = pointer.normalization.bias
SCREAMING_SNAKE_CASE : int = pointer.normalization.weight
SCREAMING_SNAKE_CASE : List[str] = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE : int = pointer.normalization.running_var
SCREAMING_SNAKE_CASE : str = backbone.layer[pt_index + 1]
SCREAMING_SNAKE_CASE : Tuple = f"MobilenetV1/Conv2d_{tf_index}_pointwise/"
SCREAMING_SNAKE_CASE : Union[str, Any] = pointer.convolution.weight
SCREAMING_SNAKE_CASE : Union[str, Any] = pointer.normalization.bias
SCREAMING_SNAKE_CASE : Optional[int] = pointer.normalization.weight
SCREAMING_SNAKE_CASE : Union[str, Any] = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE : Tuple = pointer.normalization.running_var
if isinstance(_a , _a):
SCREAMING_SNAKE_CASE : Any = "MobilenetV1/Logits/Conv2d_1c_1x1/"
SCREAMING_SNAKE_CASE : str = model.classifier.weight
SCREAMING_SNAKE_CASE : Union[str, Any] = model.classifier.bias
return tf_to_pt_map
def lowerCamelCase__ ( _a , _a , _a):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
# Load weights from TF model
SCREAMING_SNAKE_CASE : Dict = tf.train.list_variables(_a)
SCREAMING_SNAKE_CASE : Dict = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
SCREAMING_SNAKE_CASE : Optional[int] = tf.train.load_variable(_a , _a)
SCREAMING_SNAKE_CASE : Any = array
# Build TF to PyTorch weights loading map
SCREAMING_SNAKE_CASE : Any = _build_tf_to_pytorch_map(_a , _a , _a)
for name, pointer in tf_to_pt_map.items():
logger.info(f"Importing {name}")
if name not in tf_weights:
logger.info(f"{name} not in tf pre-trained weights, skipping")
continue
SCREAMING_SNAKE_CASE : List[str] = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise")
SCREAMING_SNAKE_CASE : Optional[int] = np.transpose(_a , (2, 3, 0, 1))
elif "weights" in name:
logger.info("Transposing")
if len(pointer.shape) == 2: # copying into linear layer
SCREAMING_SNAKE_CASE : Union[str, Any] = array.squeeze().transpose()
else:
SCREAMING_SNAKE_CASE : List[str] = np.transpose(_a , (3, 2, 0, 1))
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
logger.info(f"Initialize PyTorch weight {name} {array.shape}")
SCREAMING_SNAKE_CASE : int = torch.from_numpy(_a)
tf_weights.pop(_a , _a)
tf_weights.pop(name + "/RMSProp" , _a)
tf_weights.pop(name + "/RMSProp_1" , _a)
tf_weights.pop(name + "/ExponentialMovingAverage" , _a)
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}")
return model
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = features.shape[-2:]
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = conv_layer.stride
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = conv_layer.kernel_size
if in_height % stride_height == 0:
SCREAMING_SNAKE_CASE : str = max(kernel_height - stride_height , 0)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = max(kernel_height - (in_height % stride_height) , 0)
if in_width % stride_width == 0:
SCREAMING_SNAKE_CASE : List[str] = max(kernel_width - stride_width , 0)
else:
SCREAMING_SNAKE_CASE : str = max(kernel_width - (in_width % stride_width) , 0)
SCREAMING_SNAKE_CASE : Union[str, Any] = pad_along_width // 2
SCREAMING_SNAKE_CASE : int = pad_along_width - pad_left
SCREAMING_SNAKE_CASE : List[str] = pad_along_height // 2
SCREAMING_SNAKE_CASE : List[str] = pad_along_height - pad_top
SCREAMING_SNAKE_CASE : int = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_a , _a , "constant" , 0.0)
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , a : MobileNetVaConfig , a : int , a : int , a : int , a : Optional[int] = 1 , a : Optional[int] = 1 , a : bool = False , a : Optional[bool] = True , a : Optional[bool or str] = True , ) -> None:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = config
if in_channels % groups != 0:
raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups." )
if out_channels % groups != 0:
raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups." )
SCREAMING_SNAKE_CASE : Any = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad(
in_channels=a , out_channels=a , kernel_size=a , stride=a , padding=a , groups=a , bias=a , padding_mode="zeros" , )
if use_normalization:
SCREAMING_SNAKE_CASE : str = nn.BatchNormad(
num_features=a , eps=config.layer_norm_eps , momentum=0.9997 , affine=a , track_running_stats=a , )
else:
SCREAMING_SNAKE_CASE : List[str] = None
if use_activation:
if isinstance(a , a ):
SCREAMING_SNAKE_CASE : Tuple = ACTaFN[use_activation]
elif isinstance(config.hidden_act , a ):
SCREAMING_SNAKE_CASE : int = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE : Optional[Any] = config.hidden_act
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
def __UpperCamelCase ( self : Dict , a : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
if self.config.tf_padding:
SCREAMING_SNAKE_CASE : List[str] = apply_tf_padding(a , self.convolution )
SCREAMING_SNAKE_CASE : Optional[Any] = self.convolution(a )
if self.normalization is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.normalization(a )
if self.activation is not None:
SCREAMING_SNAKE_CASE : Dict = self.activation(a )
return features
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =MobileNetVaConfig
lowerCamelCase__ =load_tf_weights_in_mobilenet_va
lowerCamelCase__ ='mobilenet_v1'
lowerCamelCase__ ='pixel_values'
lowerCamelCase__ =False
def __UpperCamelCase ( self : Optional[int] , a : Union[nn.Linear, nn.Convad] ) -> None:
"""simple docstring"""
if isinstance(a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(a , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
a_ = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
a_ = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' , __A , )
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Optional[Any] , a : MobileNetVaConfig , a : bool = True ) -> Optional[int]:
"""simple docstring"""
super().__init__(a )
SCREAMING_SNAKE_CASE : List[str] = config
SCREAMING_SNAKE_CASE : str = 32
SCREAMING_SNAKE_CASE : Union[str, Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
SCREAMING_SNAKE_CASE : int = MobileNetVaConvLayer(
a , in_channels=config.num_channels , out_channels=a , kernel_size=3 , stride=2 , )
SCREAMING_SNAKE_CASE : List[str] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
SCREAMING_SNAKE_CASE : str = nn.ModuleList()
for i in range(13 ):
SCREAMING_SNAKE_CASE : List[str] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
SCREAMING_SNAKE_CASE : int = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
a , in_channels=a , out_channels=a , kernel_size=3 , stride=strides[i] , groups=a , ) )
self.layer.append(
MobileNetVaConvLayer(
a , in_channels=a , out_channels=a , kernel_size=1 , ) )
SCREAMING_SNAKE_CASE : Dict = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __UpperCamelCase ( self : List[Any] , a : str ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCamelCase ( self : Any , a : Optional[torch.Tensor] = None , a : Optional[bool] = None , a : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE : Dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_stem(a )
SCREAMING_SNAKE_CASE : Optional[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
SCREAMING_SNAKE_CASE : List[Any] = layer_module(a )
if output_hidden_states:
SCREAMING_SNAKE_CASE : Dict = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE : Dict = hidden_states
if self.pooler is not None:
SCREAMING_SNAKE_CASE : Optional[int] = torch.flatten(self.pooler(a ) , start_dim=1 )
else:
SCREAMING_SNAKE_CASE : List[str] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a , pooler_output=a , hidden_states=a , )
@add_start_docstrings(
'\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __A , )
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Dict , a : MobileNetVaConfig ) -> None:
"""simple docstring"""
super().__init__(a )
SCREAMING_SNAKE_CASE : List[Any] = config.num_labels
SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaModel(a )
SCREAMING_SNAKE_CASE : str = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
SCREAMING_SNAKE_CASE : int = nn.Dropout(config.classifier_dropout_prob , inplace=a )
SCREAMING_SNAKE_CASE : Dict = nn.Linear(a , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCamelCase ( self : List[str] , a : Optional[torch.Tensor] = None , a : Optional[bool] = None , a : Optional[torch.Tensor] = None , a : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE : List[str] = self.mobilenet_va(a , output_hidden_states=a , return_dict=a )
SCREAMING_SNAKE_CASE : Any = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE : Optional[int] = self.classifier(self.dropout(a ) )
SCREAMING_SNAKE_CASE : int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE : int = "single_label_classification"
else:
SCREAMING_SNAKE_CASE : Optional[int] = "multi_label_classification"
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE : Any = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE : int = loss_fct(a , a )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE : Dict = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : List[str] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE : int = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE : List[Any] = loss_fct(a , a )
if not return_dict:
SCREAMING_SNAKE_CASE : Dict = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=a , logits=a , hidden_states=outputs.hidden_states , )
| 193
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a_ = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
a_ = 10
a_ = 256
def lowerCamelCase__ ( _a):
if len(_a) < MIN_NUM_TOKENS:
return None
SCREAMING_SNAKE_CASE : Optional[int] = MinHash(num_perm=_a)
for token in set(_a):
min_hash.update(token.encode())
return min_hash
def lowerCamelCase__ ( _a):
return {t for t in NON_ALPHA.split(_a) if len(t.strip()) > 0}
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , *,
a : float = 0.85 , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = duplication_jaccard_threshold
SCREAMING_SNAKE_CASE : Any = NUM_PERM
SCREAMING_SNAKE_CASE : int = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
SCREAMING_SNAKE_CASE : Union[str, Any] = defaultdict(a )
def __UpperCamelCase ( self : List[str] , a : Tuple , a : MinHash ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self._index.query(a )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(a , a )
if len(a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a )
def __UpperCamelCase ( self : Optional[int] ) -> List[List[Dict]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = []
for base, duplicates in self._duplicate_clusters.items():
SCREAMING_SNAKE_CASE : int = [base] + list(a )
# reformat the cluster to be a list of dict
SCREAMING_SNAKE_CASE : Union[str, Any] = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(a )
return duplicate_clusters
def __UpperCamelCase ( self : List[Any] , a : List[str] ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.get_duplicate_clusters()
with open(a , "w" ) as f:
json.dump(a , a )
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = element
SCREAMING_SNAKE_CASE : Any = get_min_hash([t for t in NON_ALPHA.split(data["content"]) if len(t.strip()) > 0])
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCamelCase__ ( _a):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_a , max_queue_size=10000) , chunksize=100 , ):
if data is not None:
yield data
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : List[Any] = DuplicationIndex(duplication_jaccard_threshold=_a)
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_a)) , max_queue_size=100)):
di.add(_a , _a)
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : List[Any] = get_tokens(_a)
SCREAMING_SNAKE_CASE : List[str] = get_tokens(_a)
return len(tokensa & tokensa) / len(tokensa | tokensa)
a_ = None
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[Any] = []
for elementa in cluster:
SCREAMING_SNAKE_CASE : int = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
SCREAMING_SNAKE_CASE : int = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(_a , _a) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
SCREAMING_SNAKE_CASE : Optional[int] = 1
extremes.append(_a)
return extremes
def lowerCamelCase__ ( _a , _a , _a):
global _shared_dataset
SCREAMING_SNAKE_CASE : Optional[Any] = dataset
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : Dict = partial(_find_cluster_extremes_shared , jaccard_threshold=_a)
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_a , _a , ) , total=len(_a) , ):
extremes_list.append(_a)
return extremes_list
def lowerCamelCase__ ( _a , _a = 0.85):
SCREAMING_SNAKE_CASE : List[Any] = make_duplicate_clusters(_a , _a)
SCREAMING_SNAKE_CASE : Dict = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
SCREAMING_SNAKE_CASE : Optional[int] = {}
SCREAMING_SNAKE_CASE : List[str] = find_extremes(_a , _a , _a)
for extremes in extremes_clusters:
for element in extremes:
SCREAMING_SNAKE_CASE : str = element
SCREAMING_SNAKE_CASE : Optional[int] = duplicate_indices - set(extreme_dict.keys())
SCREAMING_SNAKE_CASE : str = dataset.filter(lambda _a , _a: idx not in remove_indices , with_indices=_a)
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
SCREAMING_SNAKE_CASE : Dict = element["base_index"] in extreme_dict
if element["is_extreme"]:
SCREAMING_SNAKE_CASE : Optional[Any] = extreme_dict[element["base_index"]]["copies"]
print(f"Original dataset size: {len(_a)}")
print(f"Number of duplicate clusters: {len(_a)}")
print(f"Files in duplicate cluster: {len(_a)}")
print(f"Unique files in duplicate cluster: {len(_a)}")
print(f"Filtered dataset size: {len(_a)}")
return ds_filter, duplicate_clusters
| 193
| 1
|
'''simple docstring'''
import pprint
import requests
_SCREAMING_SNAKE_CASE = "https://zenquotes.io/api"
def lowerCamelCase( ) -> list:
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def lowerCamelCase( ) -> list:
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = random_quotes()
pprint.pprint(response)
| 366
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = ViTImageProcessor if is_vision_available() else None
@property
def __snake_case ( self : List[str]) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Optional[Any]) -> List[str]:
A_ = (3, 32, 128)
A_ = tempfile.mkdtemp()
# fmt: off
A_ = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
A_ = dict(zip(_lowercase , range(len(_lowercase))))
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(_lowercase) + '\n')
A_ = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 32, 'width': 128},
}
A_ = os.path.join(self.tmpdirname , _lowercase)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(_lowercase , _lowercase)
def __snake_case ( self : int , **_lowercase : Optional[int]) -> int:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase)
def __snake_case ( self : Optional[int] , **_lowercase : Optional[int]) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowercase)
def __snake_case ( self : Dict) -> str:
shutil.rmtree(self.tmpdirname)
def __snake_case ( self : Union[str, Any]) -> Any:
A_ = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
A_ = Image.fromarray(np.moveaxis(_lowercase , 0 , -1))
return image_input
def __snake_case ( self : Optional[Any]) -> List[Any]:
A_ = self.get_tokenizer()
A_ = self.get_image_processor()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
processor.save_pretrained(self.tmpdirname)
A_ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_lowercase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
def __snake_case ( self : Union[str, Any]) -> Optional[Any]:
A_ = self.get_tokenizer()
A_ = self.get_image_processor()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
processor.save_pretrained(self.tmpdirname)
A_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
A_ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0)
A_ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_lowercase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
def __snake_case ( self : List[Any]) -> str:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = self.prepare_image_inputs()
A_ = image_processor(_lowercase , return_tensors='np')
A_ = processor(images=_lowercase , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def __snake_case ( self : Any) -> str:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = 'test'
A_ = processor(text=_lowercase)
A_ = tokenizer(_lowercase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __snake_case ( self : str) -> Dict:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = 'test'
A_ = self.prepare_image_inputs()
A_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'labels'])
# test if it raises when no input is passed
with pytest.raises(_lowercase):
processor()
def __snake_case ( self : Union[str, Any]) -> Optional[int]:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.char_decode(_lowercase)
A_ = tokenizer.batch_decode(_lowercase)
A_ = [seq.replace(' ' , '') for seq in decoded_tok]
self.assertListEqual(_lowercase , _lowercase)
def __snake_case ( self : List[str]) -> str:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = None
A_ = self.prepare_image_inputs()
A_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def __snake_case ( self : List[str]) -> Any:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = torch.randn(1 , 27 , 38)
A_ = torch.randn(1 , 27 , 50_257)
A_ = torch.randn(1 , 27 , 30_522)
A_ = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'])
| 366
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( a__):
"""simple docstring"""
__UpperCAmelCase = """roberta"""
def __init__( self , _UpperCAmelCase=50_265 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__snake_case : str = vocab_size
__snake_case : Optional[Any] = hidden_size
__snake_case : int = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : Union[str, Any] = hidden_act
__snake_case : Optional[int] = intermediate_size
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : Optional[Any] = initializer_range
__snake_case : List[str] = layer_norm_eps
__snake_case : List[str] = position_embedding_type
__snake_case : List[str] = use_cache
__snake_case : str = classifier_dropout
class __SCREAMING_SNAKE_CASE ( a__):
"""simple docstring"""
@property
def lowercase_ ( self ):
if self.task == "multiple-choice":
__snake_case : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__snake_case : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 709
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_vision_model"
def __init__( self , _UpperCAmelCase=1_408 , _UpperCAmelCase=6_144 , _UpperCAmelCase=39 , _UpperCAmelCase=16 , _UpperCAmelCase=224 , _UpperCAmelCase=14 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : Optional[Any] = hidden_size
__snake_case : Any = intermediate_size
__snake_case : str = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : int = patch_size
__snake_case : Dict = image_size
__snake_case : Any = initializer_range
__snake_case : List[Any] = attention_dropout
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Optional[int] = hidden_act
__snake_case : int = qkv_bias
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : str = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_qformer"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=2 , _UpperCAmelCase=1_408 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Union[str, Any] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Optional[Any] = hidden_act
__snake_case : int = intermediate_size
__snake_case : str = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Dict = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : Union[str, Any] = position_embedding_type
__snake_case : Optional[int] = cross_attention_frequency
__snake_case : Union[str, Any] = encoder_hidden_size
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : Optional[int] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : List[Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip"
__UpperCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=32 , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
if vision_config is None:
__snake_case : List[str] = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__snake_case : Union[str, Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__snake_case : str = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__snake_case : Optional[Any] = InstructBlipVisionConfig(**_UpperCAmelCase )
__snake_case : Tuple = InstructBlipQFormerConfig(**_UpperCAmelCase )
__snake_case : List[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
__snake_case : str = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
__snake_case : List[Any] = self.text_config.tie_word_embeddings
__snake_case : Optional[int] = self.text_config.is_encoder_decoder
__snake_case : List[str] = num_query_tokens
__snake_case : Tuple = self.vision_config.hidden_size
__snake_case : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__snake_case : str = 1.0
__snake_case : Optional[int] = 0.02
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Tuple = copy.deepcopy(self.__dict__ )
__snake_case : Tuple = self.vision_config.to_dict()
__snake_case : List[Any] = self.qformer_config.to_dict()
__snake_case : Optional[int] = self.text_config.to_dict()
__snake_case : List[str] = self.__class__.model_type
return output
| 679
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__UpperCamelCase : Union[str, Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__UpperCamelCase : List[Any] = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
__UpperCamelCase : List[str] = {
"""google/electra-small-generator""": 512,
"""google/electra-base-generator""": 512,
"""google/electra-large-generator""": 512,
"""google/electra-small-discriminator""": 512,
"""google/electra-base-discriminator""": 512,
"""google/electra-large-discriminator""": 512,
}
__UpperCamelCase : str = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Optional[Any] = VOCAB_FILES_NAMES
__snake_case :List[str] = PRETRAINED_VOCAB_FILES_MAP
__snake_case :Optional[int] = PRETRAINED_INIT_CONFIGURATION
__snake_case :Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case :Any = ElectraTokenizer
def __init__( self : Any , _lowerCAmelCase : Any=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]="[UNK]" , _lowerCAmelCase : Optional[int]="[SEP]" , _lowerCAmelCase : Dict="[PAD]" , _lowerCAmelCase : Union[str, Any]="[CLS]" , _lowerCAmelCase : Dict="[MASK]" , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
__lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowerCAmelCase ) != tokenize_chinese_chars
):
__lowercase = getattr(_lowerCAmelCase , normalizer_state.pop("""type""" ) )
__lowercase = do_lower_case
__lowercase = strip_accents
__lowercase = tokenize_chinese_chars
__lowercase = normalizer_class(**_lowerCAmelCase )
__lowercase = do_lower_case
def _a ( self : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Any=None ) -> int:
"""simple docstring"""
__lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Union[str, Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__lowercase = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 80
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : Optional[Any] = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """gptj"""
__lowercase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase_=5_04_00 , lowerCAmelCase_=20_48 , lowerCAmelCase_=40_96 , lowerCAmelCase_=28 , lowerCAmelCase_=16 , lowerCAmelCase_=64 , lowerCAmelCase_=None , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=5_02_56 , lowerCAmelCase_=5_02_56 , lowerCAmelCase_=False , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = vocab_size
_snake_case = n_positions
_snake_case = n_embd
_snake_case = n_layer
_snake_case = n_head
_snake_case = n_inner
_snake_case = rotary_dim
_snake_case = activation_function
_snake_case = resid_pdrop
_snake_case = embd_pdrop
_snake_case = attn_pdrop
_snake_case = layer_norm_epsilon
_snake_case = initializer_range
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , tie_word_embeddings=lowerCAmelCase_ , **lowerCAmelCase_ )
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = "default" , lowerCAmelCase_ = None , lowerCAmelCase_ = False , ):
"""simple docstring"""
super().__init__(lowerCAmelCase_ , task=lowerCAmelCase_ , patching_specs=lowerCAmelCase_ , use_past=lowerCAmelCase_ )
if not getattr(self._config , 'pad_token_id' , lowerCAmelCase_ ):
# TODO: how to do that better?
_snake_case = 0
@property
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
_snake_case = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return self._config.n_layer
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return self._config.n_head
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ):
"""simple docstring"""
_snake_case = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
_snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case = seqlen + 2
_snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_snake_case = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
_snake_case = common_inputs['attention_mask']
if self.use_past:
_snake_case = ordered_inputs['attention_mask'].dtype
_snake_case = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return 13
| 495
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ) -> Tuple:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase_ : Optional[int] = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : Dict = TFAutoModel.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : Tuple = AutoModel.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def lowercase_ ( self ) -> List[str]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase_ : Dict = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : List[str] = TFAutoModelForPreTraining.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : Tuple = AutoModelForPreTraining.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def lowercase_ ( self ) -> Dict:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : List[str] = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : List[Any] = TFAutoModelForCausalLM.from_pretrained(__lowercase , from_pt=__lowercase )
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = TFAutoModelForCausalLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : str = AutoModelForCausalLM.from_pretrained(__lowercase , from_tf=__lowercase )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : List[Any] = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : str = TFAutoModelWithLMHead.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : Dict = AutoModelWithLMHead.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def lowercase_ ( self ) -> Any:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : str = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : int = TFAutoModelForMaskedLM.from_pretrained(__lowercase , from_pt=__lowercase )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = TFAutoModelForMaskedLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : Tuple = AutoModelForMaskedLM.from_pretrained(__lowercase , from_tf=__lowercase )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = AutoModelForMaskedLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def lowercase_ ( self ) -> List[str]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(__lowercase , from_pt=__lowercase )
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__lowercase , from_tf=__lowercase )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def lowercase_ ( self ) -> int:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase_ : Tuple = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def lowercase_ ( self ) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase_ : int = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : int = TFAutoModelForQuestionAnswering.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : Any = AutoModelForQuestionAnswering.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_4_4_1_0 )
lowerCAmelCase_ : Dict = AutoModelWithLMHead.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_4_4_1_0 )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : str = TFAutoModelWithLMHead.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_4_4_1_0 )
lowerCAmelCase_ : List[str] = AutoModelWithLMHead.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_4_4_1_0 )
| 619
|
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int:
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Optional[Any] = {1: 1}
for inputa in range(2 , lowerCAmelCase_ ):
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Dict = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCAmelCase_ : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCAmelCase_ : Tuple = counter
if counter > pre_counter:
lowerCAmelCase_ : Optional[int] = inputa
lowerCAmelCase_ : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 619
| 1
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
A = random.Random()
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: int , lowerCamelCase_: Tuple=1.0 , lowerCamelCase_: Union[str, Any]=None , lowerCamelCase_: Optional[Any]=None ):
"""simple docstring"""
if rng is None:
snake_case : Any = global_rng
snake_case : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a ( unittest.TestCase):
def __init__( self : List[str] , _lowercase : List[Any] , _lowercase : Optional[int]=7 , _lowercase : Tuple=400 , _lowercase : Optional[Any]=2000 , _lowercase : Dict=2048 , _lowercase : str=128 , _lowercase : Tuple=1 , _lowercase : Optional[int]=512 , _lowercase : Any=30 , _lowercase : List[Any]=44100 , ) -> List[str]:
snake_case : Tuple = parent
snake_case : List[str] = batch_size
snake_case : Dict = min_seq_length
snake_case : Dict = max_seq_length
snake_case : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case : List[Any] = spectrogram_length
snake_case : int = feature_size
snake_case : List[Any] = num_audio_channels
snake_case : Optional[int] = hop_length
snake_case : Any = chunk_length
snake_case : List[Any] = sampling_rate
def __lowercase ( self : Dict ) -> str:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __lowercase ( self : str , _lowercase : Any=False , _lowercase : int=False ) -> Dict:
def _flatten(_lowercase : List[Any] ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
snake_case : int = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case : str = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case : Tuple = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a ( snake_case_ , unittest.TestCase):
__magic_name__ = TvltFeatureExtractor
def __lowercase ( self : List[str] ) -> Tuple:
snake_case : str = TvltFeatureExtractionTester(self )
def __lowercase ( self : Dict ) -> Optional[Any]:
snake_case : Dict = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCamelCase__ , "spectrogram_length" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "feature_size" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "num_audio_channels" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "hop_length" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "chunk_length" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "sampling_rate" ) )
def __lowercase ( self : List[Any] ) -> Dict:
snake_case : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case : List[str] = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
snake_case : str = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
snake_case : Union[str, Any] = feat_extract_first.to_dict()
snake_case : Optional[Any] = feat_extract_second.to_dict()
snake_case : Optional[int] = dict_first.pop("mel_filters" )
snake_case : str = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self : Union[str, Any] ) -> str:
snake_case : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case : Any = os.path.join(lowerCamelCase__ , "feat_extract.json" )
feat_extract_first.to_json_file(lowerCamelCase__ )
snake_case : List[Any] = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
snake_case : Optional[int] = feat_extract_first.to_dict()
snake_case : List[Any] = feat_extract_second.to_dict()
snake_case : str = dict_first.pop("mel_filters" )
snake_case : Optional[Any] = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self : Any ) -> List[str]:
snake_case : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
snake_case : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case : Any = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
snake_case : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
snake_case : Any = feature_extractor(lowerCamelCase__ , return_tensors="np" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
snake_case : Optional[Any] = feature_extractor(
lowerCamelCase__ , return_tensors="np" , sampling_rate=44100 , mask_audio=lowerCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
snake_case : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
snake_case : List[Any] = np.asarray(lowerCamelCase__ )
snake_case : Optional[int] = feature_extractor(lowerCamelCase__ , return_tensors="np" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __lowercase ( self : int , _lowercase : Union[str, Any] ) -> List[Any]:
snake_case : Optional[int] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
snake_case : Optional[Any] = ds.sort("id" ).select(range(lowerCamelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __lowercase ( self : str ) -> Tuple:
snake_case : Any = self._load_datasamples(1 )
snake_case : Optional[Any] = TvltFeatureExtractor()
snake_case : Optional[Any] = feature_extractor(lowerCamelCase__ , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
snake_case : Any = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowerCamelCase__ , atol=1E-4 ) )
| 449
|
'''simple docstring'''
def __snake_case ( _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[int]):
UpperCamelCase = [1]
for i in range(2, _UpperCAmelCase):
factorials.append(factorials[-1] * i)
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCamelCase = []
UpperCamelCase = list(range(_UpperCAmelCase))
# Find permutation
while factorials:
UpperCamelCase = factorials.pop()
UpperCamelCase , UpperCamelCase = divmod(_UpperCAmelCase, _UpperCAmelCase)
permutation.append(elements[number])
elements.remove(elements[number])
permutation.append(elements[0])
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 212
| 0
|
def lowerCamelCase__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
lowerCAmelCase_ = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__lowerCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class _lowerCAmelCase ( __a ):
_lowercase ='''transfo-xl'''
_lowercase =['''mems''']
_lowercase ={
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _UpperCamelCase=267_735 , _UpperCamelCase=[20_000, 40_000, 200_000] , _UpperCamelCase=1_024 , _UpperCamelCase=1_024 , _UpperCamelCase=16 , _UpperCamelCase=64 , _UpperCamelCase=4_096 , _UpperCamelCase=4 , _UpperCamelCase=False , _UpperCamelCase=18 , _UpperCamelCase=1_600 , _UpperCamelCase=1_000 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=0 , _UpperCamelCase=-1 , _UpperCamelCase=True , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , _UpperCamelCase=True , _UpperCamelCase="normal" , _UpperCamelCase=0.01 , _UpperCamelCase=0.01 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-5 , _UpperCamelCase=0 , **_UpperCamelCase , ) -> Dict:
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = []
self.cutoffs.extend(_UpperCamelCase )
if proj_share_all_but_first:
lowerCAmelCase_ = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase_ = [False] + [False] * len(self.cutoffs )
lowerCAmelCase_ = d_model
lowerCAmelCase_ = d_embed
lowerCAmelCase_ = d_head
lowerCAmelCase_ = d_inner
lowerCAmelCase_ = div_val
lowerCAmelCase_ = pre_lnorm
lowerCAmelCase_ = n_layer
lowerCAmelCase_ = n_head
lowerCAmelCase_ = mem_len
lowerCAmelCase_ = same_length
lowerCAmelCase_ = attn_type
lowerCAmelCase_ = clamp_len
lowerCAmelCase_ = sample_softmax
lowerCAmelCase_ = adaptive
lowerCAmelCase_ = dropout
lowerCAmelCase_ = dropatt
lowerCAmelCase_ = untie_r
lowerCAmelCase_ = init
lowerCAmelCase_ = init_range
lowerCAmelCase_ = proj_init_std
lowerCAmelCase_ = init_std
lowerCAmelCase_ = layer_norm_epsilon
super().__init__(eos_token_id=_UpperCamelCase , **_UpperCamelCase )
@property
def __a ( self ) -> List[Any]:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __a ( self , _UpperCamelCase ) -> str:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 279
| 0
|
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if index == r:
for j in range(lowerCAmelCase_):
print(data[j] , end=" ")
print(" ")
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowerCamelCase_ : str = arr[i]
combination_util(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , index + 1 , lowerCAmelCase_ , i + 1)
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , i + 1)
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , 0 , lowerCAmelCase_ , 0)
if __name__ == "__main__":
# Driver code to check the function above
__magic_name__ = [1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 250
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ : Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase_ : Optional[int] = test_metrics
@require_cpu
def _UpperCamelCase ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _UpperCamelCase ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _UpperCamelCase ( self ):
self.test_metrics.main()
@require_multi_gpu
def _UpperCamelCase ( self ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
lowerCamelCase_ : Tuple = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a_ , env=os.environ.copy() )
| 250
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 707
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : int )-> Union[str, Any]:
__UpperCamelCase = tempfile.mkdtemp()
# fmt: off
__UpperCamelCase = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
__UpperCamelCase = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
__UpperCamelCase = {"unk_token": "<unk>"}
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(A_ ) )
__UpperCamelCase = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
__UpperCamelCase = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(A_ , A_ )
def A ( self : Dict , **A_ : List[str] )-> List[Any]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A_ )
def A ( self : Optional[int] , **A_ : Any )-> Tuple:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def A ( self : Any , **A_ : List[Any] )-> Optional[int]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def A ( self : Tuple )-> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def A ( self : int )-> str:
__UpperCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__UpperCamelCase = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : List[Any] )-> Optional[Any]:
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = self.get_rust_tokenizer()
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
processor_slow.save_pretrained(self.tmpdirname )
__UpperCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
__UpperCamelCase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
processor_fast.save_pretrained(self.tmpdirname )
__UpperCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A_ )
self.assertIsInstance(processor_fast.tokenizer , A_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A_ )
self.assertIsInstance(processor_fast.image_processor , A_ )
def A ( self : Dict )-> Dict:
__UpperCamelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__UpperCamelCase = self.get_image_processor(do_normalize=A_ , padding_value=1.0 )
__UpperCamelCase = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def A ( self : int )-> Any:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = image_processor(A_ , return_tensors="np" )
__UpperCamelCase = processor(images=A_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A ( self : int )-> Union[str, Any]:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = "lower newer"
__UpperCamelCase = processor(text=A_ )
__UpperCamelCase = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A ( self : List[Any] )-> List[Any]:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = "lower newer"
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def A ( self : Union[str, Any] )-> Union[str, Any]:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = processor(images=A_ , visual_prompt=A_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def A ( self : Optional[int] )-> int:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase = processor.batch_decode(A_ )
__UpperCamelCase = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
| 228
| 0
|
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase : Tuple = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
_lowercase : Dict = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = calculate_rouge(A , A , bootstrap_aggregation=A , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(A , A )
UpperCAmelCase = calculate_rouge(A , A , bootstrap_aggregation=A , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = '''rougeLsum'''
UpperCAmelCase = calculate_rouge(A , A , newline_sep=A , rouge_keys=[k] )[k]
UpperCAmelCase = calculate_rouge(A , A , newline_sep=A , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = ['''rouge1''', '''rouge2''', '''rougeL''']
UpperCAmelCase = calculate_rouge(A , A , newline_sep=A , rouge_keys=A )
UpperCAmelCase = calculate_rouge(A , A , newline_sep=A , rouge_keys=A )
assert score_sep == score_no_sep
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
UpperCAmelCase = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(A , A , newline_sep=A ) == calculate_rouge(A , A , newline_sep=A )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
UpperCAmelCase = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
UpperCAmelCase = calculate_rouge(A , A , rouge_keys=['''rougeLsum'''] , newline_sep=A )['''rougeLsum''']
UpperCAmelCase = calculate_rouge(A , A , rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
UpperCAmelCase = calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(A , A )
UpperCAmelCase = calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=A )
assert isinstance(A , A )
| 210
|
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Dict = VQModel
__magic_name__ : Tuple = "sample"
@property
def a__( self : List[str] , lowerCAmelCase : Optional[int]=(32, 32) )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = 4
UpperCAmelCase = 3
UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
return {"sample": image}
@property
def a__( self : Optional[int] )-> List[Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def a__( self : Dict )-> List[str]:
"""simple docstring"""
return (3, 32, 32)
def a__( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCAmelCase = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def a__( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
pass
def a__( self : List[str] )-> Tuple:
"""simple docstring"""
pass
def a__( self : int )-> str:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(lowerCAmelCase )
UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def a__( self : int )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(lowerCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
UpperCAmelCase = image.to(lowerCAmelCase )
with torch.no_grad():
UpperCAmelCase = model(lowerCAmelCase ).sample
UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
| 210
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ):
a : List[str] = UnCLIPImageVariationPipeline
a : str = IMAGE_VARIATION_PARAMS - {"""height""", """width""", """guidance_scale"""}
a : List[str] = IMAGE_VARIATION_BATCH_PARAMS
a : int = [
"""generator""",
"""return_dict""",
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
a : int = False
@property
def UpperCAmelCase_ ( self ) -> Dict:
return 32
@property
def UpperCAmelCase_ ( self ) -> str:
return 32
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return self.time_input_dim
@property
def UpperCAmelCase_ ( self ) -> int:
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ) -> List[Any]:
return 100
@property
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def UpperCAmelCase_ ( self ) -> int:
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase )
@property
def UpperCAmelCase_ ( self ) -> str:
torch.manual_seed(0 )
__lowerCAmelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(UpperCamelCase )
@property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
__lowerCAmelCase = {
"clip_embeddings_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"cross_attention_dim": self.cross_attention_dim,
}
__lowerCAmelCase = UnCLIPTextProjModel(**UpperCamelCase )
return model
@property
def UpperCAmelCase_ ( self ) -> int:
torch.manual_seed(0 )
__lowerCAmelCase = {
"sample_size": 32,
# RGB in channels
"in_channels": 3,
# Out channels is double in channels because predicts mean and variance
"out_channels": 6,
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": "identity",
}
__lowerCAmelCase = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def UpperCAmelCase_ ( self ) -> Tuple:
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def UpperCAmelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def UpperCAmelCase_ ( self ) -> List[Any]:
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
__lowerCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.dummy_decoder
__lowerCAmelCase = self.dummy_text_proj
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = self.dummy_tokenizer
__lowerCAmelCase = self.dummy_super_res_first
__lowerCAmelCase = self.dummy_super_res_last
__lowerCAmelCase = UnCLIPScheduler(
variance_type="learned_range" , prediction_type="epsilon" , num_train_timesteps=1000 , )
__lowerCAmelCase = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="epsilon" , num_train_timesteps=1000 , )
__lowerCAmelCase = CLIPImageProcessor(crop_size=32 , size=32 )
__lowerCAmelCase = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=0 , UpperCamelCase=True ) -> Optional[int]:
__lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
if str(UpperCamelCase ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(UpperCamelCase )
else:
__lowerCAmelCase = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
if pil_image:
__lowerCAmelCase = input_image * 0.5 + 0.5
__lowerCAmelCase = input_image.clamp(0 , 1 )
__lowerCAmelCase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowerCAmelCase = DiffusionPipeline.numpy_to_pil(UpperCamelCase )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = "cpu"
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**UpperCamelCase )
__lowerCAmelCase = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = self.get_dummy_inputs(UpperCamelCase , pil_image=UpperCamelCase )
__lowerCAmelCase = pipe(**UpperCamelCase )
__lowerCAmelCase = output.images
__lowerCAmelCase = self.get_dummy_inputs(UpperCamelCase , pil_image=UpperCamelCase )
__lowerCAmelCase = pipe(
**UpperCamelCase , return_dict=UpperCamelCase , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[
0.99_97,
0.00_02,
0.99_97,
0.99_97,
0.99_69,
0.00_23,
0.99_97,
0.99_69,
0.99_70,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = "cpu"
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**UpperCamelCase )
__lowerCAmelCase = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = self.get_dummy_inputs(UpperCamelCase , pil_image=UpperCamelCase )
__lowerCAmelCase = pipe(**UpperCamelCase )
__lowerCAmelCase = output.images
__lowerCAmelCase = self.get_dummy_inputs(UpperCamelCase , pil_image=UpperCamelCase )
__lowerCAmelCase = pipe(
**UpperCamelCase , return_dict=UpperCamelCase , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array([0.99_97, 0.00_03, 0.99_97, 0.99_97, 0.99_70, 0.00_24, 0.99_97, 0.99_71, 0.99_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self ) -> Optional[Any]:
__lowerCAmelCase = "cpu"
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**UpperCamelCase )
__lowerCAmelCase = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = self.get_dummy_inputs(UpperCamelCase , pil_image=UpperCamelCase )
__lowerCAmelCase = [
pipeline_inputs["image"],
pipeline_inputs["image"],
]
__lowerCAmelCase = pipe(**UpperCamelCase )
__lowerCAmelCase = output.images
__lowerCAmelCase = self.get_dummy_inputs(UpperCamelCase , pil_image=UpperCamelCase )
__lowerCAmelCase = [
tuple_pipeline_inputs["image"],
tuple_pipeline_inputs["image"],
]
__lowerCAmelCase = pipe(
**UpperCamelCase , return_dict=UpperCamelCase , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
__lowerCAmelCase = np.array(
[
0.99_97,
0.99_89,
0.00_08,
0.00_21,
0.99_60,
0.00_18,
0.00_14,
0.00_02,
0.99_33,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = torch.device("cpu" )
class UpperCAmelCase__ :
a : int = 1
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**UpperCamelCase )
__lowerCAmelCase = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
__lowerCAmelCase = pipe.decoder.dtype
__lowerCAmelCase = 1
__lowerCAmelCase = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
__lowerCAmelCase = pipe.prepare_latents(
UpperCamelCase , dtype=UpperCamelCase , device=UpperCamelCase , generator=UpperCamelCase , latents=UpperCamelCase , scheduler=DummyScheduler() )
__lowerCAmelCase = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
__lowerCAmelCase = pipe.prepare_latents(
UpperCamelCase , dtype=UpperCamelCase , device=UpperCamelCase , generator=UpperCamelCase , latents=UpperCamelCase , scheduler=DummyScheduler() )
__lowerCAmelCase = self.get_dummy_inputs(UpperCamelCase , pil_image=UpperCamelCase )
__lowerCAmelCase = pipe(
**UpperCamelCase , decoder_latents=UpperCamelCase , super_res_latents=UpperCamelCase ).images
__lowerCAmelCase = self.get_dummy_inputs(UpperCamelCase , pil_image=UpperCamelCase )
# Don't pass image, instead pass embedding
__lowerCAmelCase = pipeline_inputs.pop("image" )
__lowerCAmelCase = pipe.image_encoder(UpperCamelCase ).image_embeds
__lowerCAmelCase = pipe(
**UpperCamelCase , decoder_latents=UpperCamelCase , super_res_latents=UpperCamelCase , image_embeddings=UpperCamelCase , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = torch_device == "cpu"
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
__lowerCAmelCase = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase , expected_max_diff=UpperCamelCase )
@skip_mps
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = torch_device == "cpu"
__lowerCAmelCase = True
__lowerCAmelCase = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase , relax_max_difference=UpperCamelCase , additional_params_copy_to_batched_inputs=UpperCamelCase , )
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
__lowerCAmelCase = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=UpperCamelCase , additional_params_copy_to_batched_inputs=UpperCamelCase , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=UpperCamelCase )
@skip_mps
def UpperCAmelCase_ ( self ) -> List[str]:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCAmelCase_ ( self ) -> Tuple:
return super().test_save_load_local()
@skip_mps
def UpperCAmelCase_ ( self ) -> List[Any]:
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png" )
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/unclip/karlo_v1_alpha_cat_variation_fp16.npy" )
__lowerCAmelCase = UnCLIPImageVariationPipeline.from_pretrained(
"kakaobrain/karlo-v1-alpha-image-variations" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase = pipeline(
UpperCamelCase , generator=UpperCamelCase , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase , 15 )
| 39
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {}
lowerCAmelCase : Dict[Optional[str], str] = {}
lowerCAmelCase : Dict[Optional[str], Exception] = {}
def __lowerCAmelCase ( lowerCamelCase : type , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None , ):
'''simple docstring'''
__lowerCAmelCase = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
__lowerCAmelCase = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
__lowerCAmelCase = format_type
def __lowerCAmelCase ( lowerCamelCase : Exception , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None ):
'''simple docstring'''
__lowerCAmelCase = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__lowerCAmelCase = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
lowerCAmelCase : Optional[int] = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
lowerCAmelCase : str = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
lowerCAmelCase : Any = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def __lowerCAmelCase ( lowerCamelCase : Optional[str] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __lowerCAmelCase ( lowerCamelCase : Optional[str] , **lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCAmelCase = get_format_type_from_alias(lowerCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 39
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 104
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase: List[Any] = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase: Dict = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase: Dict = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__UpperCamelCase: List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 266
| 0
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''xlnet'''
UpperCamelCase = ['''mems''']
UpperCamelCase = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : str , _UpperCAmelCase : str=32000 , _UpperCAmelCase : str=1024 , _UpperCAmelCase : Tuple=24 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : List[Any]=4096 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Any="bi" , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : List[Any]=1e-12 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : List[Any]=512 , _UpperCAmelCase : Any=None , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : str=-1 , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : str="last" , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : List[Any]="tanh" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : int=5 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : Optional[int]=5 , _UpperCAmelCase : str=1 , _UpperCAmelCase : List[str]=2 , **_UpperCAmelCase : Optional[Any] , ) -> str:
'''simple docstring'''
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = d_model
UpperCAmelCase_ = n_layer
UpperCAmelCase_ = n_head
if d_model % n_head != 0:
raise ValueError(F"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"""`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
UpperCAmelCase_ = d_model // n_head
UpperCAmelCase_ = ff_activation
UpperCAmelCase_ = d_inner
UpperCAmelCase_ = untie_r
UpperCAmelCase_ = attn_type
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = dropout
UpperCAmelCase_ = mem_len
UpperCAmelCase_ = reuse_len
UpperCAmelCase_ = bi_data
UpperCAmelCase_ = clamp_len
UpperCAmelCase_ = same_length
UpperCAmelCase_ = summary_type
UpperCAmelCase_ = summary_use_proj
UpperCAmelCase_ = summary_activation
UpperCAmelCase_ = summary_last_dropout
UpperCAmelCase_ = start_n_top
UpperCAmelCase_ = end_n_top
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = pad_token_id
UpperCAmelCase_ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , _UpperCAmelCase , )
UpperCAmelCase_ = kwargs["use_cache"]
UpperCAmelCase_ = use_mems_eval
UpperCAmelCase_ = use_mems_train
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def lowercase__ ( self : Dict , _UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 712
|
"""simple docstring"""
import string
def a__ ( lowerCAmelCase__ ):
for key in range(len(string.ascii_uppercase ) ):
UpperCAmelCase_ = ""
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCAmelCase_ = string.ascii_uppercase.find(lowerCAmelCase__ )
UpperCAmelCase_ = num - key
if num < 0:
UpperCAmelCase_ = num + len(string.ascii_uppercase )
UpperCAmelCase_ = translated + string.ascii_uppercase[num]
else:
UpperCAmelCase_ = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def a__ ( ):
UpperCAmelCase_ = input("Encrypted message: " )
UpperCAmelCase_ = message.upper()
decrypt(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 14
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( __lowercase , unittest.TestCase ):
__a = UnCLIPImageVariationPipeline
__a = IMAGE_VARIATION_PARAMS - {"""height""", """width""", """guidance_scale"""}
__a = IMAGE_VARIATION_BATCH_PARAMS
__a = [
"""generator""",
"""return_dict""",
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
__a = False
@property
def UpperCAmelCase ( self ):
return 32
@property
def UpperCAmelCase ( self ):
return 32
@property
def UpperCAmelCase ( self ):
return self.time_input_dim
@property
def UpperCAmelCase ( self ):
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self ):
return 100
@property
def UpperCAmelCase ( self ):
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCAmelCase ( self ):
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase ( self ):
torch.manual_seed(0 )
_UpperCAmelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(_SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase ( self ):
torch.manual_seed(0 )
_UpperCAmelCase = {
"""clip_embeddings_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""cross_attention_dim""": self.cross_attention_dim,
}
_UpperCAmelCase = UnCLIPTextProjModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def UpperCAmelCase ( self ):
torch.manual_seed(0 )
_UpperCAmelCase = {
"""sample_size""": 32,
# RGB in channels
"""in_channels""": 3,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 6,
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": """identity""",
}
_UpperCAmelCase = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def UpperCAmelCase ( self ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def UpperCAmelCase ( self ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def UpperCAmelCase ( self ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
_UpperCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.dummy_decoder
_UpperCAmelCase = self.dummy_text_proj
_UpperCAmelCase = self.dummy_text_encoder
_UpperCAmelCase = self.dummy_tokenizer
_UpperCAmelCase = self.dummy_super_res_first
_UpperCAmelCase = self.dummy_super_res_last
_UpperCAmelCase = UnCLIPScheduler(
variance_type="""learned_range""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , )
_UpperCAmelCase = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , )
_UpperCAmelCase = CLIPImageProcessor(crop_size=32 , size=32 )
_UpperCAmelCase = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=True ):
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
_UpperCAmelCase = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
if pil_image:
_UpperCAmelCase = input_image * 0.5 + 0.5
_UpperCAmelCase = input_image.clamp(0 , 1 )
_UpperCAmelCase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_UpperCAmelCase = DiffusionPipeline.numpy_to_pil(_SCREAMING_SNAKE_CASE )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def UpperCAmelCase ( self ):
_UpperCAmelCase = """cpu"""
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipe(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = output.images
_UpperCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipe(
**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ):
_UpperCAmelCase = """cpu"""
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipe(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = output.images
_UpperCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipe(
**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ):
_UpperCAmelCase = """cpu"""
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [
pipeline_inputs["""image"""],
pipeline_inputs["""image"""],
]
_UpperCAmelCase = pipe(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = output.images
_UpperCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [
tuple_pipeline_inputs["""image"""],
tuple_pipeline_inputs["""image"""],
]
_UpperCAmelCase = pipe(
**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
_UpperCAmelCase = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ):
_UpperCAmelCase = torch.device("""cpu""" )
class _A :
__a = 1
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
_UpperCAmelCase = pipe.decoder.dtype
_UpperCAmelCase = 1
_UpperCAmelCase = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
_UpperCAmelCase = pipe.prepare_latents(
_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , scheduler=DummyScheduler() )
_UpperCAmelCase = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
_UpperCAmelCase = pipe.prepare_latents(
_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , scheduler=DummyScheduler() )
_UpperCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipe(
**_SCREAMING_SNAKE_CASE , decoder_latents=_SCREAMING_SNAKE_CASE , super_res_latents=_SCREAMING_SNAKE_CASE ).images
_UpperCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
# Don't pass image, instead pass embedding
_UpperCAmelCase = pipeline_inputs.pop("""image""" )
_UpperCAmelCase = pipe.image_encoder(_SCREAMING_SNAKE_CASE ).image_embeds
_UpperCAmelCase = pipe(
**_SCREAMING_SNAKE_CASE , decoder_latents=_SCREAMING_SNAKE_CASE , super_res_latents=_SCREAMING_SNAKE_CASE , image_embeddings=_SCREAMING_SNAKE_CASE , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def UpperCAmelCase ( self ):
_UpperCAmelCase = torch_device == """cpu"""
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
_UpperCAmelCase = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=_SCREAMING_SNAKE_CASE )
@skip_mps
def UpperCAmelCase ( self ):
_UpperCAmelCase = torch_device == """cpu"""
_UpperCAmelCase = True
_UpperCAmelCase = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
self._test_inference_batch_single_identical(
test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , additional_params_copy_to_batched_inputs=_SCREAMING_SNAKE_CASE , )
def UpperCAmelCase ( self ):
_UpperCAmelCase = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
_UpperCAmelCase = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=_SCREAMING_SNAKE_CASE , additional_params_copy_to_batched_inputs=_SCREAMING_SNAKE_CASE , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=_SCREAMING_SNAKE_CASE )
@skip_mps
def UpperCAmelCase ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCAmelCase ( self ):
return super().test_save_load_local()
@skip_mps
def UpperCAmelCase ( self ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def UpperCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ):
_UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png""" )
_UpperCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/unclip/karlo_v1_alpha_cat_variation_fp16.npy""" )
_UpperCAmelCase = UnCLIPImageVariationPipeline.from_pretrained(
"""kakaobrain/karlo-v1-alpha-image-variations""" , torch_dtype=torch.floataa )
_UpperCAmelCase = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase = pipeline(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , output_type="""np""" , )
_UpperCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 15 )
| 518
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
a = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
a = get_tests_dir("fixtures/vocab.json")
a = get_tests_dir("fixtures")
class _A ( unittest.TestCase ):
__a = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def UpperCAmelCase ( self ):
_UpperCAmelCase = 0
def UpperCAmelCase ( self ):
_UpperCAmelCase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = WavaVecaConfig()
_UpperCAmelCase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = AutoProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
copyfile(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" ) )
_UpperCAmelCase = AutoProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = WavaVecaFeatureExtractor()
_UpperCAmelCase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
_UpperCAmelCase = WavaVecaProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# save in new folder
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
# drop `processor_class` in tokenizer
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """r""" ) as f:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
config_dict.pop("""processor_class""" )
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """w""" ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = AutoProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = WavaVecaFeatureExtractor()
_UpperCAmelCase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
_UpperCAmelCase = WavaVecaProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# save in new folder
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
# drop `processor_class` in feature extractor
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """r""" ) as f:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
config_dict.pop("""processor_class""" )
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """w""" ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = AutoProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(_SCREAMING_SNAKE_CASE )
# copy relevant files
copyfile(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """w""" ) as f:
f.write("""{}""" )
_UpperCAmelCase = AutoProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
_UpperCAmelCase = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
_UpperCAmelCase = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
_UpperCAmelCase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def UpperCAmelCase ( self ):
try:
AutoConfig.register("""custom""" , _SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE , slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
AutoProcessor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoProcessor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
_UpperCAmelCase = CustomFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.txt""" )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_UpperCAmelCase = CustomTokenizer(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = CustomProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = AutoProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase ( self ):
class _A ( __lowercase ):
__a = False
class _A ( __lowercase ):
__a = False
class _A ( __lowercase ):
__a = """AutoFeatureExtractor"""
__a = """AutoTokenizer"""
__a = False
try:
AutoConfig.register("""custom""" , _SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE , slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
AutoProcessor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local classes.
_UpperCAmelCase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
_UpperCAmelCase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
_UpperCAmelCase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase ( self ):
_UpperCAmelCase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def UpperCAmelCase ( self ):
_UpperCAmelCase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class _A ( unittest.TestCase ):
__a = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def UpperCAmelCase ( cls ):
_UpperCAmelCase = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def UpperCAmelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def UpperCAmelCase ( self ):
_UpperCAmelCase = WavaVecaProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_SCREAMING_SNAKE_CASE , """test-processor""" ) , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
_UpperCAmelCase = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , _SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def UpperCAmelCase ( self ):
_UpperCAmelCase = WavaVecaProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_SCREAMING_SNAKE_CASE , """test-processor-org""" ) , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization="""valid_org""" , )
_UpperCAmelCase = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , _SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def UpperCAmelCase ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
_UpperCAmelCase = CustomFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.txt""" )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_UpperCAmelCase = CustomTokenizer(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = CustomProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
_UpperCAmelCase = Repository(_SCREAMING_SNAKE_CASE , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(_SCREAMING_SNAKE_CASE , """tokenizer_config.json""" ) ) as f:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(_SCREAMING_SNAKE_CASE , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(_SCREAMING_SNAKE_CASE , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(_SCREAMING_SNAKE_CASE , """custom_processing.py""" ) ) )
repo.push_to_hub()
_UpperCAmelCase = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=_SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 518
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( __lowerCamelCase ):
'''simple docstring'''
lowerCamelCase__ = """deberta-v2"""
def __init__( self , lowercase=128100 , lowercase=1536 , lowercase=24 , lowercase=24 , lowercase=6144 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=0 , lowercase=0.02 , lowercase=1E-7 , lowercase=False , lowercase=-1 , lowercase=0 , lowercase=True , lowercase=None , lowercase=0 , lowercase="gelu" , **lowercase , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : List[Any] = num_attention_heads
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : Any = max_position_embeddings
_lowerCamelCase : Any = type_vocab_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Optional[Any] = relative_attention
_lowerCamelCase : Optional[int] = max_relative_positions
_lowerCamelCase : List[Any] = pad_token_id
_lowerCamelCase : List[Any] = position_biased_input
# Backwards compatibility
if type(SCREAMING_SNAKE_CASE_ ) == str:
_lowerCamelCase : List[str] = [x.strip() for x in pos_att_type.lower().split('|' )]
_lowerCamelCase : Optional[int] = pos_att_type
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Optional[Any] = kwargs.get('pooler_hidden_size' , SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : int = pooler_dropout
_lowerCamelCase : List[Any] = pooler_hidden_act
class lowerCAmelCase__ ( __lowerCamelCase ):
'''simple docstring'''
@property
def A_ ( self ):
if self.task == "multiple-choice":
_lowerCamelCase : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCamelCase : Union[str, Any] = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def A_ ( self ):
return 12
def A_ ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , lowercase = 3 , lowercase = 40 , lowercase = 40 , lowercase = None , ):
_lowerCamelCase : int = super().generate_dummy_inputs(preprocessor=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 720
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """umt5"""
lowerCamelCase__ = ["""past_key_values"""]
def __init__( self , lowercase=250112 , lowercase=512 , lowercase=64 , lowercase=1024 , lowercase=8 , lowercase=None , lowercase=6 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gated-gelu" , lowercase=True , lowercase=True , lowercase="T5Tokenizer" , lowercase=True , lowercase=0 , lowercase=1 , lowercase=0 , **lowercase , ):
super().__init__(
is_encoder_decoder=lowercase , tokenizer_class=lowercase , tie_word_embeddings=lowercase , pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , **lowercase , )
_lowerCamelCase : Union[str, Any] = vocab_size
_lowerCamelCase : Optional[Any] = d_model
_lowerCamelCase : Optional[Any] = d_kv
_lowerCamelCase : Optional[int] = d_ff
_lowerCamelCase : List[str] = num_layers
_lowerCamelCase : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_lowerCamelCase : List[str] = num_heads
_lowerCamelCase : int = relative_attention_num_buckets
_lowerCamelCase : Union[str, Any] = relative_attention_max_distance
_lowerCamelCase : int = dropout_rate
_lowerCamelCase : List[Any] = layer_norm_epsilon
_lowerCamelCase : Union[str, Any] = initializer_factor
_lowerCamelCase : List[Any] = feed_forward_proj
_lowerCamelCase : Dict = use_cache
_lowerCamelCase : Dict = self.feed_forward_proj.split('-' )
_lowerCamelCase : List[str] = act_info[-1]
_lowerCamelCase : int = act_info[0] == 'gated'
if len(lowercase ) > 1 and act_info[0] != "gated" or len(lowercase ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
_lowerCamelCase : Optional[int] = 'gelu_new'
@property
def A_ ( self ):
return self.d_model
@property
def A_ ( self ):
return self.num_heads
@property
def A_ ( self ):
return self.num_layers
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def A_ ( self ):
_lowerCamelCase : Tuple = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
_lowerCamelCase : Optional[int] = 'past_encoder_sequence + sequence'
_lowerCamelCase : int = {0: 'batch'}
_lowerCamelCase : List[str] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowerCamelCase : Dict = {0: 'batch', 1: 'decoder_sequence'}
_lowerCamelCase : str = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def A_ ( self ):
return 13
@property
def A_ ( self ):
return 5E-4
| 492
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = "bloom"
_SCREAMING_SNAKE_CASE : str = ["past_key_values"]
_SCREAMING_SNAKE_CASE : List[str] = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__(self : List[str] , snake_case_ : Union[str, Any]=2_5_0_8_8_0 , snake_case_ : Any=6_4 , snake_case_ : Optional[Any]=2 , snake_case_ : Optional[Any]=8 , snake_case_ : Tuple=1E-5 , snake_case_ : int=0.02 , snake_case_ : Any=True , snake_case_ : Any=1 , snake_case_ : List[Any]=2 , snake_case_ : Any=False , snake_case_ : Any=0.0 , snake_case_ : Optional[Any]=0.0 , snake_case_ : Any=1 , snake_case_ : Optional[Any]=False , **snake_case_ : int , ):
__a : Any = vocab_size
# Backward compatibility with n_embed kwarg
__a : List[Any] = kwargs.pop('''n_embed''' , snake_case_ )
__a : Dict = hidden_size if n_embed is None else n_embed
__a : Any = n_layer
__a : Optional[Any] = n_head
__a : Dict = layer_norm_epsilon
__a : List[str] = initializer_range
__a : Optional[Any] = use_cache
__a : Optional[int] = pretraining_tp
__a : Optional[int] = apply_residual_connection_post_layernorm
__a : List[str] = hidden_dropout
__a : str = attention_dropout
__a : Tuple = bos_token_id
__a : Optional[Any] = eos_token_id
__a : Optional[int] = slow_but_exact
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = version.parse("1.12" )
def __init__(self : str , snake_case_ : PretrainedConfig , snake_case_ : str = "default" , snake_case_ : List[PatchingSpec] = None , snake_case_ : bool = False , ):
super().__init__(snake_case_ , task=snake_case_ , patching_specs=snake_case_ , use_past=snake_case_ )
if not getattr(self._config , '''pad_token_id''' , snake_case_ ):
# TODO: how to do that better?
__a : Union[str, Any] = 0
@property
def lowerCAmelCase (self : List[Any] ):
__a : str = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(snake_case_ , direction='''inputs''' , inverted_values_shape=snake_case_ )
__a : Union[str, Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__a : Tuple = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def lowerCAmelCase (self : List[str] ):
return self._config.n_layer
@property
def lowerCAmelCase (self : int ):
return self._config.n_head
@property
def lowerCAmelCase (self : Dict ):
return 1E-3
def lowerCAmelCase (self : Dict , snake_case_ : "PreTrainedTokenizer" , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional["TensorType"] = None , ):
__a : List[str] = super(snake_case_ , self ).generate_dummy_inputs(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
# We need to order the input in the way they appears in the forward()
__a : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__a , __a : Dict = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__a : int = seqlen + 2
__a : List[Any] = self._config.hidden_size // self.num_attention_heads
__a : Dict = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__a : str = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__a : Tuple = [
(torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) for _ in range(self.num_layers )
]
__a : Tuple = common_inputs['''attention_mask''']
if self.use_past:
__a : List[Any] = ordered_inputs['''attention_mask'''].dtype
__a : Tuple = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase (self : Any ):
return 1_3
| 521
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase__ =logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class UpperCamelCase__ ( __lowercase ):
def __init__(self : Optional[int] , *snake_case_ : Any , **snake_case_ : Dict ):
super().__init__(*snake_case_ , **snake_case_ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def lowerCAmelCase (self : Tuple , snake_case_ : Tuple=None ):
__a : int = {}
if top_k is not None:
__a : List[str] = top_k
return {}, {}, postprocess_params
def __call__(self : Optional[int] , snake_case_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **snake_case_ : List[str] ):
return super().__call__(snake_case_ , **snake_case_ )
def lowerCAmelCase (self : Any , snake_case_ : Dict ):
__a : int = load_image(snake_case_ )
__a : Dict = self.image_processor(images=snake_case_ , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase (self : str , snake_case_ : List[Any] ):
__a : Any = self.model(**snake_case_ )
return model_outputs
def lowerCAmelCase (self : Tuple , snake_case_ : Optional[Any] , snake_case_ : Tuple=5 ):
if top_k > self.model.config.num_labels:
__a : Dict = self.model.config.num_labels
if self.framework == "pt":
__a : Tuple = model_outputs.logits.softmax(-1 )[0]
__a , __a : Optional[int] = probs.topk(snake_case_ )
elif self.framework == "tf":
__a : Union[str, Any] = stable_softmax(model_outputs.logits , axis=-1 )[0]
__a : Union[str, Any] = tf.math.top_k(snake_case_ , k=snake_case_ )
__a , __a : Dict = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__a : str = scores.tolist()
__a : List[str] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(snake_case_ , snake_case_ )]
| 521
| 1
|
from math import factorial, radians
def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : int = 18 , UpperCamelCase__ : int = 10 )->float:
A__ = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
A__ = radians(UpperCamelCase__ )
A__ = angle_in_radians
A__ = 3
A__ = -1
for _ in range(UpperCamelCase__ ):
result += (b * (angle_in_radians**a)) / factorial(UpperCamelCase__ )
A__ = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
__import__('doctest').testmod()
| 212
|
import math
def UpperCamelCase__( UpperCamelCase__ : int )->bool:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
A__ = range(3 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def UpperCamelCase__( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]=1 , **UpperCamelCase__ : int )->List[Any]:
A__ = factor * value
A__ = value
while not is_prime(UpperCamelCase__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **UpperCamelCase__ )
return value
| 212
| 1
|
from math import factorial
def snake_case__ ( __SCREAMING_SNAKE_CASE = 20 ) -> int:
UpperCAmelCase_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCAmelCase_ = n // 2
return int(factorial(__SCREAMING_SNAKE_CASE ) / (factorial(__SCREAMING_SNAKE_CASE ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
SCREAMING_SNAKE_CASE = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 579
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
SCREAMING_SNAKE_CASE = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
super().__init__()
UpperCAmelCase_ = torchvision.models.resnetaaa(pretrained=lowerCAmelCase )
UpperCAmelCase_ = list(model.children() )[:-2]
UpperCAmelCase_ = nn.Sequential(*lowerCAmelCase )
UpperCAmelCase_ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def A__ ( self , lowerCAmelCase ):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
UpperCAmelCase_ = self.pool(self.model(lowerCAmelCase ) )
UpperCAmelCase_ = torch.flatten(lowerCAmelCase , start_dim=2 )
UpperCAmelCase_ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = [json.loads(lowerCAmelCase ) for l in open(lowerCAmelCase )]
UpperCAmelCase_ = os.path.dirname(lowerCAmelCase )
UpperCAmelCase_ = tokenizer
UpperCAmelCase_ = labels
UpperCAmelCase_ = len(lowerCAmelCase )
UpperCAmelCase_ = max_seq_length
UpperCAmelCase_ = transforms
def __len__( self ):
return len(self.data )
def __getitem__( self , lowerCAmelCase ):
UpperCAmelCase_ = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=lowerCAmelCase ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = sentence[0], sentence[1:-1], sentence[-1]
UpperCAmelCase_ = sentence[: self.max_seq_length]
UpperCAmelCase_ = torch.zeros(self.n_classes )
UpperCAmelCase_ = 1
UpperCAmelCase_ = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
UpperCAmelCase_ = self.transforms(lowerCAmelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def A__ ( self ):
UpperCAmelCase_ = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = [len(row["sentence"] ) for row in batch]
UpperCAmelCase_ , UpperCAmelCase_ = len(__SCREAMING_SNAKE_CASE ), max(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = torch.zeros(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=torch.long )
UpperCAmelCase_ = torch.zeros(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ = input_row["sentence"]
UpperCAmelCase_ = 1
UpperCAmelCase_ = torch.stack([row["image"] for row in batch] )
UpperCAmelCase_ = torch.stack([row["label"] for row in batch] )
UpperCAmelCase_ = torch.stack([row["image_start_token"] for row in batch] )
UpperCAmelCase_ = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def snake_case__ ( ) -> int:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def snake_case__ ( ) -> Optional[int]:
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 579
| 1
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=sys.maxsize ) -> str:
a : List[str] = "bilinear"
a : str = max_size
a : Union[str, Any] = short_edge_length
def __call__( self , lowerCAmelCase__ ) -> Union[str, Any]:
a : List[Any] = []
for img in imgs:
a, a : int = img.shape[:2]
# later: provide list and randomly choose index for resize
a : str = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
a : Dict = size * 1.0 / min(lowerCAmelCase__ , lowerCAmelCase__ )
if h < w:
a, a : Dict = size, scale * w
else:
a, a : List[Any] = scale * h, size
if max(lowerCAmelCase__ , lowerCAmelCase__ ) > self.max_size:
a : int = self.max_size * 1.0 / max(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = newh * scale
a : Optional[int] = neww * scale
a : int = int(neww + 0.5 )
a : int = int(newh + 0.5 )
if img.dtype == np.uinta:
a : Optional[int] = Image.fromarray(lowerCAmelCase__ )
a : Any = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
a : Optional[Any] = np.asarray(lowerCAmelCase__ )
else:
a : Dict = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
a : Optional[Any] = nn.functional.interpolate(
lowerCAmelCase__ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase__ ).squeeze(0 )
img_augs.append(lowerCAmelCase__ )
return img_augs
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ ) -> Optional[Any]:
a : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
a : Any = cfg.INPUT.FORMAT
a : Optional[Any] = cfg.SIZE_DIVISIBILITY
a : List[str] = cfg.PAD_VALUE
a : List[str] = cfg.INPUT.MAX_SIZE_TEST
a : Optional[int] = cfg.MODEL.DEVICE
a : List[Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
a : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
a : Dict = lambda lowerCAmelCase__ : (x - self.pixel_mean) / self.pixel_std
def __a ( self , lowerCAmelCase__ ) -> str:
a : int = tuple(max(lowerCAmelCase__ ) for s in zip(*[img.shape for img in images] ) )
a : Dict = [im.shape[-2:] for im in images]
a : Optional[Any] = [
nn.functional.pad(
lowerCAmelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
return torch.stack(lowerCAmelCase__ ), torch.tensor(lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[Any]:
with torch.no_grad():
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Optional[Any] = [images]
if single_image:
assert len(lowerCAmelCase__ ) == 1
for i in range(len(lowerCAmelCase__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowerCAmelCase__ , images.pop(lowerCAmelCase__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowerCAmelCase__ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
a : int = torch.tensor([im.shape[:2] for im in images] )
a : List[str] = self.aug(lowerCAmelCase__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
a : Optional[int] = [self.normalizer(lowerCAmelCase__ ) for x in images]
# now pad them to do the following operations
a, a : List[Any] = self.pad(lowerCAmelCase__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
a : Union[str, Any] = torch.true_divide(lowerCAmelCase__ , lowerCAmelCase__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : Any ) ->str:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : Tuple[int, int] ) ->Union[str, Any]:
'''simple docstring'''
assert torch.isfinite(_lowercase ).all(), "Box tensor contains infinite or NaN!"
a, a : Tuple = box_size
tensor[:, 0].clamp_(min=0 , max=_lowercase )
tensor[:, 1].clamp_(min=0 , max=_lowercase )
tensor[:, 2].clamp_(min=0 , max=_lowercase )
tensor[:, 3].clamp_(min=0 , max=_lowercase )
| 31
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
a : Optional[int] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : str = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : List[str] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> List[Any]:
a : Optional[Any] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Tuple:
a : Tuple = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : Dict = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Dict = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
a : List[Any] = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Any = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> int:
# pass variant but use the non-variant filenames
a : int = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
a : Tuple = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : str = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
a : Any = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : Union[str, Any] = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
# pass variant but use the non-variant filenames
a : Optional[int] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : Any = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Optional[int] = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
| 31
| 1
|
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[Any] = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
__A : List[Any] = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def lowercase ( UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
A__ : Optional[Any] ={
"word_embeddings.weight": "word_embeddings.weight",
"word_embeddings.norm.weight": "word_embeddings_layernorm.weight",
"word_embeddings.norm.bias": "word_embeddings_layernorm.bias",
"weight": "ln_f.weight",
"bias": "ln_f.bias",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
A__ : str =int(re.match(R".*layer_(\d*).*" , UpperCamelCase )[1] )
layer_number -= 3
return F'''h.{layer_number}.''' + key
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
A__ : Optional[int] =re.search(R"[^\d](\d+)$" , str(UpperCamelCase ) )
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' )
A__ : List[str] =int(bit_search.groups()[0] )
return bit_size // 8
def lowercase ( UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : List[str] ):
"""simple docstring"""
# Construct model
if bloom_config_file == "":
A__ : Any =BloomConfig()
else:
A__ : List[str] =BloomConfig.from_json_file(UpperCamelCase )
if shard_model:
A__ : str =os.listdir(UpperCamelCase )
A__ : str =sorted(filter(lambda UpperCamelCase : s.startswith("layer" ) and "model_00" in s , UpperCamelCase ) )
A__ : Union[str, Any] ={"weight_map": {}, "metadata": {}}
A__ : Tuple =0
A__ : Any =None
A__ : Optional[int] =BloomConfig()
for j, file in enumerate(UpperCamelCase ):
print("Processing file: {}".format(UpperCamelCase ) )
A__ : Union[str, Any] =None
for i in range(UpperCamelCase ):
# load all TP files
A__ : Optional[Any] =file.replace("model_00" , F'''model_0{i}''' )
A__ : Optional[Any] =torch.load(os.path.join(UpperCamelCase , UpperCamelCase ) , map_location="cpu" )
# Rename keys in the transformers names
A__ : Tuple =list(temp.keys() )
for key in keys:
A__ : Dict =temp.pop(UpperCamelCase )
if tensors is None:
A__ : Tuple =temp
else:
for key in tensors.keys():
if any(key.endswith(UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
A__ : List[Any] =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
A__ : Tuple =torch.cat([tensors[key], temp[key]] , dim=UpperCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
A__ : List[str] =tensors[key] / pretraining_tp
torch.save(
UpperCamelCase , os.path.join(
UpperCamelCase , "pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ) , str(len(UpperCamelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
A__ : Any =tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
A__ : str ="pytorch_model_{}-of-{}.bin".format(
str(j + 1 ).zfill(5 ) , str(len(UpperCamelCase ) ).zfill(5 ) )
A__ : Any =BloomConfig()
A__ : Any =pytorch_dump_folder_path + "/" + CONFIG_NAME
A__ : Optional[int] =total_size
with open(UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(UpperCamelCase , WEIGHTS_NAME + ".index.json" ) , "w" , encoding="utf-8" ) as f:
A__ : int =json.dumps(UpperCamelCase , indent=2 , sort_keys=UpperCamelCase ) + "\n"
f.write(UpperCamelCase )
else:
A__ : str =BloomModel(UpperCamelCase )
A__ : Tuple =os.listdir(UpperCamelCase )
A__ : List[str] =sorted(filter(lambda UpperCamelCase : s.startswith("layer" ) and "model_00" in s , UpperCamelCase ) )
A__ : Tuple =None
for i, file in enumerate(UpperCamelCase ):
A__ : str =None
for i in range(UpperCamelCase ):
# load all TP files
A__ : List[Any] =file.replace("model_00" , F'''model_0{i}''' )
A__ : int =torch.load(os.path.join(UpperCamelCase , UpperCamelCase ) , map_location="cpu" )
# Rename keys in the transformers names
A__ : Optional[Any] =list(temp.keys() )
for key in keys:
A__ : int =temp.pop(UpperCamelCase )
if tensors is None:
A__ : Tuple =temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
A__ : List[Any] =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
A__ : str =torch.cat([tensors[key], temp[key]] , dim=UpperCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
A__ : Optional[int] =tensors[key] / pretraining_tp
A__ : Optional[Any] =model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
A__ : Optional[int] =set(other_keys.missing_keys )
else:
A__ : Optional[int] =missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
A__ : Tuple =pytorch_dump_folder_path + "/" + WEIGHTS_NAME
A__ : Any =pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
A__ : Union[str, Any] =model.to(config.torch_dtype )
torch.save(model.state_dict() , UpperCamelCase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
__A : Tuple = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 656
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[int] = {"vocab_file": "spiece.model"}
__A : List[Any] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : Optional[int]="<sep>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[int]="<cls>" , UpperCamelCase__ : List[str]="<mask>" , UpperCamelCase__ : Optional[Any]=["<eop>", "<eod>"] , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Dict , ):
A__ : List[str] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
A__ : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A__ : Dict =3
A__ : int =do_lower_case
A__ : str =remove_space
A__ : Optional[Any] =keep_accents
A__ : int =vocab_file
A__ : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
A__ : Union[str, Any] =jieba
A__ : List[str] =str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _UpperCAmelCase ( self : Union[str, Any] ):
return len(self.sp_model )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Any ={self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
A__ : Union[str, Any] =self.__dict__.copy()
A__ : Tuple =None
return state
def __setstate__( self : Tuple , UpperCamelCase__ : int ):
A__ : Union[str, Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ : Optional[int] ={}
A__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Dict ):
if self.remove_space:
A__ : Optional[int] =" ".join(inputs.strip().split() )
else:
A__ : Optional[Any] =inputs
A__ : Any =outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
A__ : Optional[Any] =unicodedata.normalize("NFKD" , UpperCamelCase__ )
A__ : Tuple ="".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
A__ : str =outputs.lower()
return outputs
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : str ):
A__ : Optional[int] =self.preprocess_text(UpperCamelCase__ )
A__ : Dict =self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
A__ : List[str] =[]
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
A__ : str =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : Union[str, Any] =cur_pieces[1:]
else:
A__ : List[str] =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _UpperCAmelCase ( self : int , UpperCamelCase__ : str ):
return self.sp_model.PieceToId(UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] ):
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : str ):
A__ : Optional[int] ="".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _UpperCAmelCase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : Optional[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : Tuple =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
A__ : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def _UpperCAmelCase ( self : str , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ):
A__ : List[Any] =super()._decode(*UpperCamelCase__ , **UpperCamelCase__ )
A__ : Union[str, Any] =text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 656
| 1
|
from collections.abc import Callable
import numpy as np
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE__ =np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__ =ya
SCREAMING_SNAKE_CASE__ =xa
for k in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =y[k] + step_size * ode_func(__UpperCamelCase, y[k] )
SCREAMING_SNAKE_CASE__ =y[k] + (
(step_size / 2) * (ode_func(__UpperCamelCase, y[k] ) + ode_func(x + step_size, __UpperCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
from __future__ import annotations
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase ): # noqa: E741
while r - l > 1:
SCREAMING_SNAKE_CASE__ =(l + r) // 2
if v[m] >= key:
SCREAMING_SNAKE_CASE__ =m
else:
SCREAMING_SNAKE_CASE__ =m # noqa: E741
return r
def UpperCAmelCase_ ( __UpperCamelCase ):
if len(__UpperCamelCase ) == 0:
return 0
SCREAMING_SNAKE_CASE__ =[0] * len(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =1
SCREAMING_SNAKE_CASE__ =v[0]
for i in range(1, len(__UpperCamelCase ) ):
if v[i] < tail[0]:
SCREAMING_SNAKE_CASE__ =v[i]
elif v[i] > tail[length - 1]:
SCREAMING_SNAKE_CASE__ =v[i]
length += 1
else:
SCREAMING_SNAKE_CASE__ =v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588
| 0
|
import math
def a ( A__ : int ) -> list[int]:
"""simple docstring"""
_lowercase =[]
_lowercase =2
_lowercase =int(math.sqrt(A__ ) ) # Size of every segment
_lowercase =[True] * (end + 1)
_lowercase =[]
while start <= end:
if temp[start] is True:
in_prime.append(A__ )
for i in range(start * start , end + 1 , A__ ):
_lowercase =False
start += 1
prime += in_prime
_lowercase =end + 1
_lowercase =min(2 * end , A__ )
while low <= n:
_lowercase =[True] * (high - low + 1)
for each in in_prime:
_lowercase =math.floor(low / each ) * each
if t < low:
t += each
for j in range(A__ , high + 1 , A__ ):
_lowercase =False
for j in range(len(A__ ) ):
if temp[j] is True:
prime.append(j + low )
_lowercase =high + 1
_lowercase =min(high + end , A__ )
return prime
print(sieve(1_0**6))
| 291
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowercase_ = False
lowercase_ = True
lowercase_ = False
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
lowercase_ = parser.parse_args()
lowercase_ = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
lowercase_ = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
lowercase_ = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
lowercase_ = reader.read()
lowercase_ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
lowercase_ = UNetaDModel(**config)
else:
lowercase_ = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
lowercase_ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowercase_ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowercase_ = config[key]
del config[key]
lowercase_ = [k.replace('UNetRes', '') for k in config['down_block_types']]
lowercase_ = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
lowercase_ = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
lowercase_ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
lowercase_ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
lowercase_ = param_value
lowercase_ = True
if not has_changed:
lowercase_ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 291
| 1
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = OmegaConf.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase , map_location='cpu' )['model']
SCREAMING_SNAKE_CASE = list(state_dict.keys() )
# extract state_dict for VQVAE
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = 'first_stage_model.'
for key in keys:
if key.startswith(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = state_dict[key]
# extract state_dict for UNetLDM
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = 'model.diffusion_model.'
for key in keys:
if key.startswith(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = state_dict[key]
SCREAMING_SNAKE_CASE = config.model.params.first_stage_config.params
SCREAMING_SNAKE_CASE = config.model.params.unet_config.params
SCREAMING_SNAKE_CASE = VQModel(**_UpperCamelCase ).eval()
vqvae.load_state_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = UNetLDMModel(**_UpperCamelCase ).eval()
unet.load_state_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = LDMPipeline(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
pipeline.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument("--config_path", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
a_ : str = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 673
|
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(_UpperCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 673
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCAmelCase = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
UpperCAmelCase = {
"""moussaKam/mbarthez""": 1024,
"""moussaKam/barthez""": 1024,
"""moussaKam/barthez-orangesum-title""": 1024,
}
UpperCAmelCase = """▁"""
class lowercase__ ( A_ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="<mask>" , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase : List[str] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) else mask_token
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_lowerCamelCase : Optional[int] = vocab_file
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(SCREAMING_SNAKE_CASE))
_lowerCamelCase : Union[str, Any] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
_lowerCamelCase : Optional[int] = len(self.sp_model) - 1
_lowerCamelCase : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase : Dict = [self.cls_token_id]
_lowerCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE)
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE)) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE)) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE)) + [1]
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> List[int]:
_lowerCamelCase : Union[str, Any] = [self.sep_token_id]
_lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def UpperCamelCase_ ( self) -> Union[str, Any]:
return len(self.sp_model)
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : Tuple = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> str:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCamelCase : Optional[Any] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE)
return spm_id if spm_id else self.unk_token_id
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> str:
_lowerCamelCase : Any = []
_lowerCamelCase : List[str] = """"""
_lowerCamelCase : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE) + token
_lowerCamelCase : Any = True
_lowerCamelCase : Union[str, Any] = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE)
_lowerCamelCase : str = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE)
return out_string.strip()
def __getstate__( self) -> int:
_lowerCamelCase : List[str] = self.__dict__.copy()
_lowerCamelCase : int = None
return state
def __setstate__( self , SCREAMING_SNAKE_CASE) -> Dict:
_lowerCamelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
_lowerCamelCase : Any = {}
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
_lowerCamelCase : Tuple = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE , """wb""") as fi:
_lowerCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
| 88
|
import torch
from torch import nn
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self: int , __lowerCAmelCase: List[Any] , __lowerCAmelCase: str , __lowerCAmelCase: int , __lowerCAmelCase: List[Any] , __lowerCAmelCase: Dict=1 , __lowerCAmelCase: Union[str, Any]=False ) -> int:
'''simple docstring'''
super().__init__()
__UpperCAmelCase = n_token
__UpperCAmelCase = d_embed
__UpperCAmelCase = d_proj
__UpperCAmelCase = cutoffs + [n_token]
__UpperCAmelCase = [0] + self.cutoffs
__UpperCAmelCase = div_val
__UpperCAmelCase = self.cutoffs[0]
__UpperCAmelCase = len(self.cutoffs ) - 1
__UpperCAmelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
__UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
__UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
__UpperCAmelCase = nn.ModuleList()
__UpperCAmelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
else:
self.out_projs.append(__lowerCAmelCase )
self.out_layers.append(nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
__UpperCAmelCase , __UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__UpperCAmelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
self.out_layers.append(nn.Linear(__lowerCAmelCase , r_idx - l_idx ) )
__UpperCAmelCase = keep_order
def _UpperCAmelCase ( self: str , __lowerCAmelCase: int , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: Dict ) -> Dict:
'''simple docstring'''
if proj is None:
__UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
__UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , proj.t().contiguous() )
__UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: Optional[Any]=None , __lowerCAmelCase: Union[str, Any]=False ) -> Tuple:
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
__UpperCAmelCase = hidden[..., :-1, :].contiguous()
__UpperCAmelCase = labels[..., 1:].contiguous()
__UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
__UpperCAmelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
__UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
__UpperCAmelCase = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
__UpperCAmelCase = labels != -100
__UpperCAmelCase = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
__UpperCAmelCase = (
-nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
__UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
__UpperCAmelCase , __UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__UpperCAmelCase , __UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
__UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
__UpperCAmelCase = self.out_layers[i].weight
__UpperCAmelCase = self.out_layers[i].bias
if i == 0:
__UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
__UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
if labels is None:
__UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
__UpperCAmelCase = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
__UpperCAmelCase = 0
__UpperCAmelCase = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
__UpperCAmelCase , __UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
__UpperCAmelCase = (labels >= l_idx) & (labels < r_idx)
__UpperCAmelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
__UpperCAmelCase = labels.index_select(0 , __lowerCAmelCase ) - l_idx
__UpperCAmelCase = head_logprob.index_select(0 , __lowerCAmelCase )
__UpperCAmelCase = hidden.index_select(0 , __lowerCAmelCase )
else:
__UpperCAmelCase = hidden
if i == 0:
if labels is not None:
__UpperCAmelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
__UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
__UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
__UpperCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
__UpperCAmelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
__UpperCAmelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
__UpperCAmelCase = logprob_i
if labels is not None:
if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 , __lowerCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _UpperCAmelCase ( self: int , __lowerCAmelCase: Optional[Any] ) -> List[Any]:
'''simple docstring'''
if self.n_clusters == 0:
__UpperCAmelCase = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
__UpperCAmelCase , __UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__UpperCAmelCase , __UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
__UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
__UpperCAmelCase = self.out_layers[i].weight
__UpperCAmelCase = self.out_layers[i].bias
if i == 0:
__UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
__UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
__UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
__UpperCAmelCase = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
__UpperCAmelCase , __UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
__UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
__UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
__UpperCAmelCase = head_logprob[:, -i] + tail_logprob_i
__UpperCAmelCase = logprob_i
return out
| 221
| 0
|
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = 'ssube/stable-diffusion-x4-upscaler-onnx'
def __A ( self , _SCREAMING_SNAKE_CASE=0 ) -> Tuple:
A_ = floats_tensor((1, 3, 128, 128) , rng=random.Random(_SCREAMING_SNAKE_CASE ) )
A_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
A_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self ) -> str:
A_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ = self.get_dummy_inputs()
A_ = pipe(**_SCREAMING_SNAKE_CASE ).images
A_ = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
A_ = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def __A ( self ) -> Any:
A_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
A_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ = self.get_dummy_inputs()
A_ = pipe(**_SCREAMING_SNAKE_CASE ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __A ( self ) -> Union[str, Any]:
A_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
A_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ = self.get_dummy_inputs()
A_ = pipe(**_SCREAMING_SNAKE_CASE ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __A ( self ) -> Optional[Any]:
A_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
A_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ = self.get_dummy_inputs()
A_ = pipe(**_SCREAMING_SNAKE_CASE ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __A ( self ) -> str:
A_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
A_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ = self.get_dummy_inputs()
A_ = pipe(**_SCREAMING_SNAKE_CASE ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __A ( self ) -> Tuple:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __A ( self ) -> Any:
A_ = ort.SessionOptions()
A_ = False
return options
def __A ( self ) -> Union[str, Any]:
A_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
A_ = init_image.resize((128, 128) )
# using the PNDM scheduler by default
A_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ = '''A fantasy landscape, trending on artstation'''
A_ = torch.manual_seed(0 )
A_ = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=10 , generator=_SCREAMING_SNAKE_CASE , output_type='''np''' , )
A_ = output.images
A_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
A_ = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __A ( self ) -> Union[str, Any]:
A_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
A_ = init_image.resize((128, 128) )
A_ = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
A_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ = '''A fantasy landscape, trending on artstation'''
A_ = torch.manual_seed(0 )
A_ = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=20 , generator=_SCREAMING_SNAKE_CASE , output_type='''np''' , )
A_ = output.images
A_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
A_ = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 174
|
'''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : int ) -> bool:
if not isinstance(_UpperCamelCase, _UpperCamelCase ):
A_ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_UpperCamelCase )
if number < 0:
return False
A_ = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 174
| 1
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __snake_case( self ):
_UpperCAmelCase : Dict = 10
def __snake_case( self ):
_UpperCAmelCase : Dict = [1, 2, 3, 4]
_UpperCAmelCase : Dict = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(A_ , self.block_size , 0 ) , A_ )
def __snake_case( self ):
_UpperCAmelCase : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_UpperCAmelCase : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(A_ , self.block_size , 0 ) , A_ )
def __snake_case( self ):
_UpperCAmelCase : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_UpperCAmelCase : Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(A_ , self.block_size , 0 ) , A_ )
def __snake_case( self ):
_UpperCAmelCase : Any = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
_UpperCAmelCase,_UpperCAmelCase : Dict = process_story(A_ )
self.assertEqual(A_ , [] )
def __snake_case( self ):
_UpperCAmelCase : Any = """"""
_UpperCAmelCase,_UpperCAmelCase : int = process_story(A_ )
self.assertEqual(A_ , [] )
self.assertEqual(A_ , [] )
def __snake_case( self ):
_UpperCAmelCase : str = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
_UpperCAmelCase,_UpperCAmelCase : int = process_story(A_ )
_UpperCAmelCase : Optional[int] = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(A_ , A_ )
_UpperCAmelCase : Optional[int] = ["""It was the best of times."""]
self.assertEqual(A_ , A_ )
def __snake_case( self ):
_UpperCAmelCase : List[Any] = torch.tensor([1, 2, 3, 4] )
_UpperCAmelCase : Optional[int] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(A_ , 0 ).numpy() , expected.numpy() )
def __snake_case( self ):
_UpperCAmelCase : Any = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_UpperCAmelCase : Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(A_ , 23 ).numpy() , expected.numpy() )
def __snake_case( self ):
_UpperCAmelCase : str = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_UpperCAmelCase : List[str] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(A_ , 1 ).numpy() , expected.numpy() )
def __snake_case( self ):
_UpperCAmelCase : List[Any] = 1_01
_UpperCAmelCase : Optional[int] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] )
_UpperCAmelCase : int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_UpperCAmelCase : Union[str, Any] = compute_token_type_ids(A_ , A_ )
np.testing.assert_array_equal(A_ , A_ )
| 643
|
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _SCREAMING_SNAKE_CASE ( A , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
__SCREAMING_SNAKE_CASE = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def __snake_case( self , A_=0 ):
_UpperCAmelCase : List[Any] = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(A_ ) )
_UpperCAmelCase : Any = torch.manual_seed(A_ )
_UpperCAmelCase : List[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __snake_case( self ):
_UpperCAmelCase : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=A_ )
_UpperCAmelCase : Optional[Any] = self.get_dummy_inputs()
_UpperCAmelCase : List[str] = pipe(**A_ ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : Any = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def __snake_case( self ):
_UpperCAmelCase : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_UpperCAmelCase : Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
_UpperCAmelCase : Optional[Any] = self.get_dummy_inputs()
_UpperCAmelCase : Optional[Any] = pipe(**A_ ).images
_UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : Optional[Any] = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __snake_case( self ):
_UpperCAmelCase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_UpperCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
_UpperCAmelCase : str = self.get_dummy_inputs()
_UpperCAmelCase : List[Any] = pipe(**A_ ).images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : Tuple = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __snake_case( self ):
_UpperCAmelCase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_UpperCAmelCase : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
_UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
_UpperCAmelCase : Union[str, Any] = pipe(**A_ ).images
_UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : List[Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __snake_case( self ):
_UpperCAmelCase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_UpperCAmelCase : List[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
_UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
_UpperCAmelCase : str = pipe(**A_ ).images
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : Tuple = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def __snake_case( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __snake_case( self ):
_UpperCAmelCase : List[str] = ort.SessionOptions()
_UpperCAmelCase : Union[str, Any] = False
return options
def __snake_case( self ):
_UpperCAmelCase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_UpperCAmelCase : Any = init_image.resize((1_28, 1_28) )
# using the PNDM scheduler by default
_UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
_UpperCAmelCase : Union[str, Any] = """A fantasy landscape, trending on artstation"""
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : Tuple = pipe(
prompt=A_ , image=A_ , guidance_scale=7.5 , num_inference_steps=10 , generator=A_ , output_type="""np""" , )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : List[Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : List[str] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def __snake_case( self ):
_UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_UpperCAmelCase : Optional[int] = init_image.resize((1_28, 1_28) )
_UpperCAmelCase : Optional[int] = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
_UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
_UpperCAmelCase : Union[str, Any] = """A fantasy landscape, trending on artstation"""
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = pipe(
prompt=A_ , image=A_ , guidance_scale=7.5 , num_inference_steps=20 , generator=A_ , output_type="""np""" , )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : Dict = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : Optional[Any] = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 643
| 1
|
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _A ( _a : Optional[int] , _a : Union[str, Any]=None ):
"""simple docstring"""
A = None
if token is not None:
A = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
A = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
A = requests.get(_a , headers=_a ).json()
A = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
A = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_a ):
A = requests.get(url + f'&page={i + 2}' , headers=_a ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def _A ( _a : List[Any] , _a : List[Any]=None ):
"""simple docstring"""
A = None
if token is not None:
A = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
A = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
A = requests.get(_a , headers=_a ).json()
A = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
A = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_a ):
A = requests.get(url + f'&page={i + 2}' , headers=_a ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def _A ( _a : Union[str, Any] , _a : Dict , _a : Tuple , _a : Any ):
"""simple docstring"""
A = None
if token is not None:
A = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
A = requests.get(_a , headers=_a , allow_redirects=_a )
A = result.headers["""Location"""]
A = requests.get(_a , allow_redirects=_a )
A = os.path.join(_a , f'{artifact_name}.zip' )
with open(_a , """wb""" ) as fp:
fp.write(response.content )
def _A ( _a : List[str] , _a : Union[str, Any]=None ):
"""simple docstring"""
A = []
A = []
A = None
with zipfile.ZipFile(_a ) as z:
for filename in z.namelist():
if not os.path.isdir(_a ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_a ) as f:
for line in f:
A = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
A = line[: line.index(""": """ )]
A = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
A = line[len("""FAILED """ ) :]
failed_tests.append(_a )
elif filename == "job_name.txt":
A = line
if len(_a ) != len(_a ):
raise ValueError(
f'`errors` and `failed_tests` should have the same number of elements. Got {len(_a )} for `errors` '
f'and {len(_a )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
""" problem.""" )
A = None
if job_name and job_links:
A = job_links.get(_a , _a )
# A list with elements of the form (line of error, error, failed test)
A = [x + [y] + [job_link] for x, y in zip(_a , _a )]
return result
def _A ( _a : Optional[int] , _a : Union[str, Any]=None ):
"""simple docstring"""
A = []
A = [os.path.join(_a , _a ) for p in os.listdir(_a ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_a , job_links=_a ) )
return errors
def _A ( _a : Union[str, Any] , _a : Tuple=None ):
"""simple docstring"""
A = Counter()
counter.update([x[1] for x in logs] )
A = counter.most_common()
A = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
A = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
A = dict(sorted(r.items() , key=lambda _a : item[1]["count"] , reverse=_a ) )
return r
def _A ( _a : Dict ):
"""simple docstring"""
A = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
A = test.split("""/""" )[2]
else:
A = None
return test
def _A ( _a : Any , _a : List[str]=None ):
"""simple docstring"""
A = [(x[0], x[1], get_model(x[2] )) for x in logs]
A = [x for x in logs if x[2] is not None]
A = {x[2] for x in logs}
A = {}
for test in tests:
A = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
A = counter.most_common()
A = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
A = sum(error_counts.values() )
if n_errors > 0:
A = {"""count""": n_errors, """errors""": error_counts}
A = dict(sorted(r.items() , key=lambda _a : item[1]["count"] , reverse=_a ) )
return r
def _A ( _a : str ):
"""simple docstring"""
A = """| no. | error | status |"""
A = """|-:|:-|:-|"""
A = [header, sep]
for error in reduced_by_error:
A = reduced_by_error[error]["""count"""]
A = f'| {count} | {error[:1_0_0]} | |'
lines.append(_a )
return "\n".join(_a )
def _A ( _a : Optional[int] ):
"""simple docstring"""
A = """| model | no. of errors | major error | count |"""
A = """|-:|-:|-:|-:|"""
A = [header, sep]
for model in reduced_by_model:
A = reduced_by_model[model]["""count"""]
A , A = list(reduced_by_model[model]["""errors"""].items() )[0]
A = f'| {model} | {count} | {error[:6_0]} | {_count} |'
lines.append(_a )
return "\n".join(_a )
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
UpperCAmelCase =parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
UpperCAmelCase =get_job_links(args.workflow_run_id, token=args.token)
UpperCAmelCase ={}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
UpperCAmelCase =k.find(" / ")
UpperCAmelCase =k[index + len(" / ") :]
UpperCAmelCase =v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
UpperCAmelCase =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
UpperCAmelCase =get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
UpperCAmelCase =Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
UpperCAmelCase =counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
UpperCAmelCase =reduce_by_error(errors)
UpperCAmelCase =reduce_by_model(errors)
UpperCAmelCase =make_github_table(reduced_by_error)
UpperCAmelCase =make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 255
|
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = False ,lowerCamelCase_ = False ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> Dict:
A = path_or_paths
A = split if split or isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else """train"""
A = features
A = cache_dir
A = keep_in_memory
A = streaming
A = num_proc
A = kwargs
@abstractmethod
def UpperCamelCase__ ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = False ,lowerCamelCase_ = False ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> int:
A = features
A = cache_dir
A = keep_in_memory
A = streaming
A = num_proc
A = kwargs
@abstractmethod
def UpperCamelCase__ ( self ) -> Union[Dataset, IterableDataset]:
pass
| 255
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 602
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__A : str = random.Random()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
if rng is None:
_A = global_rng
_A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=400 , snake_case_=2000 , snake_case_=2048 , snake_case_=128 , snake_case_=1 , snake_case_=512 , snake_case_=30 , snake_case_=4_4100 , ):
_A = parent
_A = batch_size
_A = min_seq_length
_A = max_seq_length
_A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A = spectrogram_length
_A = feature_size
_A = num_audio_channels
_A = hop_length
_A = chunk_length
_A = sampling_rate
def lowerCAmelCase__ ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCAmelCase__ ( self , snake_case_=False , snake_case_=False ):
def _flatten(snake_case_ ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
_A = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase( __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = TvltFeatureExtractor
def lowerCAmelCase__ ( self ):
_A = TvltFeatureExtractionTester(self )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case_ , 'spectrogram_length' ) )
self.assertTrue(hasattr(snake_case_ , 'feature_size' ) )
self.assertTrue(hasattr(snake_case_ , 'num_audio_channels' ) )
self.assertTrue(hasattr(snake_case_ , 'hop_length' ) )
self.assertTrue(hasattr(snake_case_ , 'chunk_length' ) )
self.assertTrue(hasattr(snake_case_ , 'sampling_rate' ) )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = feat_extract_first.save_pretrained(snake_case_ )[0]
check_json_file_has_correct_format(snake_case_ )
_A = self.feature_extraction_class.from_pretrained(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = dict_first.pop('mel_filters' )
_A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(snake_case_ , 'feat_extract.json' )
feat_extract_first.to_json_file(snake_case_ )
_A = self.feature_extraction_class.from_json_file(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = dict_first.pop('mel_filters' )
_A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
# Initialize feature_extractor
_A = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test not batched input
_A = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A = feature_extractor(
snake_case_ , return_tensors='np' , sampling_rate=4_4100 , mask_audio=snake_case_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A = np.asarray(snake_case_ )
_A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowerCAmelCase__ ( self , snake_case_ ):
_A = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_A = ds.sort('id' ).select(range(snake_case_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self ):
_A = self._load_datasamples(1 )
_A = TvltFeatureExtractor()
_A = feature_extractor(snake_case_ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , snake_case_ , atol=1E-4 ) )
| 27
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class lowercase__ ( A, A ):
'''simple docstring'''
_UpperCAmelCase = '''resnet'''
_UpperCAmelCase = ['''basic''', '''bottleneck''']
def __init__( self , snake_case=3 , snake_case=64 , snake_case=[256, 512, 1024, 2048] , snake_case=[3, 4, 6, 3] , snake_case="bottleneck" , snake_case="relu" , snake_case=False , snake_case=None , snake_case=None , **snake_case , ) -> Optional[int]:
super().__init__(**snake_case )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
_UpperCAmelCase = num_channels
_UpperCAmelCase = embedding_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = layer_type
_UpperCAmelCase = hidden_act
_UpperCAmelCase = downsample_in_first_stage
_UpperCAmelCase = ['stem'] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-3
| 711
|
"""simple docstring"""
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = abs(A )
_UpperCAmelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = abs(A )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def UpperCAmelCase ( A : int ):
'''simple docstring'''
return sum(int(A ) for c in str(abs(A ) ) )
def UpperCAmelCase ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(A : Callable , A : int ) -> None:
_UpperCAmelCase = f'{func.__name__}({value})'
_UpperCAmelCase = timeit(f'__main__.{call}' , setup='import __main__' )
print(f'{call:56} = {func(A )} -- {timing:.4f} seconds' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(A , A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24
| 0
|
import qiskit
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : int = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
_lowerCAmelCase : int = qiskit.QuantumCircuit(lowerCAmelCase__ , lowerCAmelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_lowerCAmelCase : Optional[Any] = qiskit.execute(lowerCAmelCase__ , lowerCAmelCase__ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCAmelCase__ )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 424
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : List[str] = logging.get_logger(__name__)
a__ : Tuple = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = 'speech_to_text_2'
snake_case_ = ['past_key_values']
snake_case_ = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[Any] , a_ : List[str]=1_00_00 , a_ : Optional[int]=6 , a_ : Optional[Any]=20_48 , a_ : List[Any]=4 , a_ : Any=0.0 , a_ : List[Any]=True , a_ : Any="relu" , a_ : int=2_56 , a_ : int=0.1 , a_ : List[Any]=0.0 , a_ : Optional[Any]=0.0 , a_ : Union[str, Any]=0.0_2 , a_ : List[str]=2 , a_ : List[str]=True , a_ : Dict=1 , a_ : Tuple=0 , a_ : Optional[int]=2 , a_ : Optional[int]=10_24 , **a_ : Tuple , ):
"""simple docstring"""
lowerCamelCase__ = vocab_size
lowerCamelCase__ = d_model
lowerCamelCase__ = decoder_ffn_dim
lowerCamelCase__ = decoder_layers
lowerCamelCase__ = decoder_attention_heads
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = activation_function
lowerCamelCase__ = init_std
lowerCamelCase__ = decoder_layerdrop
lowerCamelCase__ = use_cache
lowerCamelCase__ = decoder_layers
lowerCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase__ = max_target_positions
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , decoder_start_token_id=a_ , **a_ , )
| 165
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_A : List[Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Any = ["pixel_values"]
def __init__( self : List[Any] , A : bool = True , A : Dict[str, int] = None , A : float = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : Union[int, float] = 1 / 2_5_5 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Dict , ) ->None:
super().__init__(**A )
lowerCamelCase__ : int = size if size is not None else {'''shortest_edge''': 3_8_4}
lowerCamelCase__ : Optional[Any] = get_size_dict(A , default_to_square=A )
lowerCamelCase__ : Union[str, Any] = do_resize
lowerCamelCase__ : Optional[int] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase__ : Union[str, Any] = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase__ : List[Any] = resample
lowerCamelCase__ : Optional[int] = do_rescale
lowerCamelCase__ : Optional[int] = rescale_factor
lowerCamelCase__ : Union[str, Any] = do_normalize
lowerCamelCase__ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCamelCase ( self : Tuple , A : np.ndarray , A : Dict[str, int] , A : float , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : Tuple , ) ->np.ndarray:
lowerCamelCase__ : Any = get_size_dict(A , default_to_square=A )
if "shortest_edge" not in size:
raise ValueError(F"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}" )
lowerCamelCase__ : Optional[Any] = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase__ : List[str] = int(shortest_edge / crop_pct )
lowerCamelCase__ : Dict = get_resize_output_image_size(A , size=A , default_to_square=A )
lowerCamelCase__ : Optional[Any] = resize(image=A , size=A , resample=A , data_format=A , **A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=A , size=(shortest_edge, shortest_edge) , data_format=A , **A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
A , size=(shortest_edge, shortest_edge) , resample=A , data_format=A , **A )
def __lowerCamelCase ( self : str , A : np.ndarray , A : Union[int, float] , A : Optional[Union[str, ChannelDimension]] = None , **A : Optional[Any] , ) ->Dict:
return rescale(A , scale=A , data_format=A , **A )
def __lowerCamelCase ( self : List[Any] , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : str , ) ->np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def __lowerCamelCase ( self : Dict , A : ImageInput , A : bool = None , A : Dict[str, int] = None , A : float = None , A : PILImageResampling = None , A : bool = None , A : float = None , A : bool = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : ChannelDimension = ChannelDimension.FIRST , **A : Dict , ) ->PIL.Image.Image:
lowerCamelCase__ : int = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase__ : str = resample if resample is not None else self.resample
lowerCamelCase__ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : List[str] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : int = image_std if image_std is not None else self.image_std
lowerCamelCase__ : Tuple = size if size is not None else self.size
lowerCamelCase__ : int = get_size_dict(A , default_to_square=A )
lowerCamelCase__ : Dict = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCamelCase__ : List[str] = [to_numpy_array(A ) for image in images]
if do_resize:
lowerCamelCase__ : int = [self.resize(image=A , size=A , crop_pct=A , resample=A ) for image in images]
if do_rescale:
lowerCamelCase__ : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
lowerCamelCase__ : str = [self.normalize(image=A , mean=A , std=A ) for image in images]
lowerCamelCase__ : Optional[int] = [to_channel_dimension_format(A , A ) for image in images]
lowerCamelCase__ : Any = {'''pixel_values''': images}
return BatchFeature(data=A , tensor_type=A )
| 130
|
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __init__( self : Tuple , A : str = "▁" , A : bool = True , A : Union[str, AddedToken] = "<unk>" , A : Union[str, AddedToken] = "</s>" , A : Union[str, AddedToken] = "<pad>" , ) ->Optional[int]:
lowerCamelCase__ : str = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
lowerCamelCase__ : Optional[int] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowerCamelCase__ : Optional[Any] = token_dict['''token''']
lowerCamelCase__ : int = Tokenizer(Unigram() )
lowerCamelCase__ : List[Any] = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ),
normalizers.Lowercase(),
] )
lowerCamelCase__ : Dict = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=A , add_prefix_space=A ),
pre_tokenizers.Digits(individual_digits=A ),
pre_tokenizers.Punctuation(),
] )
lowerCamelCase__ : Optional[int] = decoders.Metaspace(replacement=A , add_prefix_space=A )
lowerCamelCase__ : Any = TemplateProcessing(
single=F"$A {self.special_tokens['eos']['token']}" , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , )
lowerCamelCase__ : List[str] = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(A , A )
def __lowerCamelCase ( self : List[str] , A : Union[str, List[str]] , A : int = 8_0_0_0 , A : bool = True , ) ->Optional[int]:
lowerCamelCase__ : Optional[int] = trainers.UnigramTrainer(
vocab_size=A , special_tokens=self.special_tokens_list , show_progress=A , )
if isinstance(A , A ):
lowerCamelCase__ : Union[str, Any] = [files]
self._tokenizer.train(A , trainer=A )
self.add_unk_id()
def __lowerCamelCase ( self : Union[str, Any] , A : Union[Iterator[str], Iterator[Iterator[str]]] , A : int = 8_0_0_0 , A : bool = True , ) ->List[Any]:
lowerCamelCase__ : str = trainers.UnigramTrainer(
vocab_size=A , special_tokens=self.special_tokens_list , show_progress=A , )
self._tokenizer.train_from_iterator(A , trainer=A )
self.add_unk_id()
def __lowerCamelCase ( self : int ) ->Union[str, Any]:
lowerCamelCase__ : Union[str, Any] = json.loads(self._tokenizer.to_str() )
lowerCamelCase__ : str = self.special_tokens['''unk''']['''id''']
lowerCamelCase__ : List[Any] = Tokenizer.from_str(json.dumps(A ) )
| 130
| 1
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[Any] = ['''image_processor''', '''feature_extractor''']
A : Any = '''TvltImageProcessor'''
A : int = '''TvltFeatureExtractor'''
def __init__( self, A, A ):
'''simple docstring'''
super().__init__(image_processor=A, feature_extractor=A )
SCREAMING_SNAKE_CASE : Tuple = image_processor
SCREAMING_SNAKE_CASE : Tuple = feature_extractor
def __call__( self, A=None, A=None, A=None, A=None, A=False, A=False, *A, **A, ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
SCREAMING_SNAKE_CASE : Tuple = None
if images is not None:
SCREAMING_SNAKE_CASE : List[str] = self.image_processor(A, mask_pixel=A, *A, **A )
if images_mixed is not None:
SCREAMING_SNAKE_CASE : Any = self.image_processor(A, is_mixed=A, *A, **A )
if audio is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor(
A, *A, sampling_rate=A, mask_audio=A, **A )
SCREAMING_SNAKE_CASE : List[str] = {}
if audio is not None:
output_dict.update(A )
if images is not None:
output_dict.update(A )
if images_mixed_dict is not None:
output_dict.update(A )
return output_dict
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.image_processor.model_input_names
SCREAMING_SNAKE_CASE : Tuple = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 28
|
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _a :
'''simple docstring'''
def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = device
SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A )
SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std )
SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 )
SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.resize(A )
SCREAMING_SNAKE_CASE : Any = self.center_crop(A )
SCREAMING_SNAKE_CASE : str = self.normalize(A )
return images
def __call__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A )
SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device()
if vqgan:
SCREAMING_SNAKE_CASE : Optional[Any] = vqgan
else:
SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A )
self.vqgan.eval()
if clip:
SCREAMING_SNAKE_CASE : List[str] = clip
else:
SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = iterations
SCREAMING_SNAKE_CASE : Tuple = lr
SCREAMING_SNAKE_CASE : Tuple = log
SCREAMING_SNAKE_CASE : str = make_grid
SCREAMING_SNAKE_CASE : Dict = return_val
SCREAMING_SNAKE_CASE : Union[str, Any] = quantize
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
if output_path is None:
SCREAMING_SNAKE_CASE : int = './animation.gif'
if input_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = self.save_path
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) )
if not len(A ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(A ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A )
SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A )
if extend_frames:
SCREAMING_SNAKE_CASE : List[str] = 1.5
SCREAMING_SNAKE_CASE : int = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(A ) )
imageio.mimsave(A, A, duration=A )
print(F"gif saved to {output_path}" )
def UpperCamelCase_ ( self, A=None, A=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device )
SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A )
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A )
return z
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_()
SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector
if self.quantize:
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent
return self.vqgan.decode(A )
def UpperCamelCase_ ( self, A, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A )
SCREAMING_SNAKE_CASE : str = self.clip(**A )
SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image
if weights is not None:
SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) )
if neg_prompts:
SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] )
else:
SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device )
SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A )
return loss
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A )
SCREAMING_SNAKE_CASE : Dict = loop_post_process(A )
SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A )
print('CLIP loss', A )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
wandb.init(reinit=A, project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
SCREAMING_SNAKE_CASE : Tuple = Image.open(A )
SCREAMING_SNAKE_CASE : int = image.resize((256, 256) )
wandb.log('Original Image', wandb.Image(A ) )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not prompts:
return []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Dict = []
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(A, (tuple, list) ):
SCREAMING_SNAKE_CASE : List[str] = prompt[0]
SCREAMING_SNAKE_CASE : Any = float(prompt[1] )
elif ":" in prompt:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' )
SCREAMING_SNAKE_CASE : Any = float(A )
else:
SCREAMING_SNAKE_CASE : Dict = prompt
SCREAMING_SNAKE_CASE : List[Any] = 1.0
processed_prompts.append(A )
weights.append(A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A, device=self.device ),
}
def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ):
'''simple docstring'''
if image_path:
SCREAMING_SNAKE_CASE : int = self._get_latent(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(A, A, A )
assert pos_prompts, "You must provide at least one positive prompt."
SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A )
if save_final and save_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(A ):
os.makedirs(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp()
os.makedirs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(A ) )
SCREAMING_SNAKE_CASE : int = loop_post_process(A )
for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ):
if show_intermediate:
show_pil(A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'Image': wandb.Image(A )} )
if show_final:
show_pil(A )
if save_final:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
| 28
| 1
|
# Imports
import numpy as np
class lowerCAmelCase :
def __init__( self :str , _lowercase :Tuple=None , _lowercase :int=None , _lowercase :Tuple=None , _lowercase :str=None , _lowercase :Union[str, Any]=None ):
'''simple docstring'''
self.set_matricies(red=_lowercase , green=_lowercase , blue=_lowercase , red_edge=_lowercase , nir=_lowercase )
def UpperCAmelCase ( self :Any , _lowercase :List[Any]=None , _lowercase :List[str]=None , _lowercase :Dict=None , _lowercase :Dict=None , _lowercase :Optional[int]=None ):
'''simple docstring'''
if red is not None:
lowercase__ = red
if green is not None:
lowercase__ = green
if blue is not None:
lowercase__ = blue
if red_edge is not None:
lowercase__ = red_edge
if nir is not None:
lowercase__ = nir
return True
def UpperCAmelCase ( self :int , _lowercase :Dict="" , _lowercase :Tuple=None , _lowercase :Union[str, Any]=None , _lowercase :Optional[Any]=None , _lowercase :Optional[int]=None , _lowercase :Any=None ):
'''simple docstring'''
self.set_matricies(red=_lowercase , green=_lowercase , blue=_lowercase , red_edge=_lowercase , nir=_lowercase )
lowercase__ = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def UpperCAmelCase ( self :int ):
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase ( self :str ):
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase ( self :str , _lowercase :int=0.08 , _lowercase :Dict=1.22 , _lowercase :Any=0.03 ):
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase ( self :str ):
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return (self.nir / self.green) - 1
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
return (self.red - self.blue) / self.red
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self.nir - self.green
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def UpperCAmelCase ( self :Optional[int] , _lowercase :Union[str, Any]=0.16 ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase ( self :List[str] , _lowercase :Union[str, Any]=0.5 ):
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def UpperCAmelCase ( self :Optional[int] , _lowercase :List[str]=None , _lowercase :Optional[Any]=None ):
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def UpperCAmelCase ( self :str ):
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase ( self :int ):
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase ( self :int ):
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase ( self :str ):
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowercase__ = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 704
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCAmelCase :
def __init__( self :Union[str, Any] , _lowercase :Dict , _lowercase :int=13 , _lowercase :Dict=32 , _lowercase :List[Any]=2 , _lowercase :Any=3 , _lowercase :Optional[Any]=16 , _lowercase :str=[1, 2, 1] , _lowercase :Tuple=[2, 2, 4] , _lowercase :int=2 , _lowercase :Optional[Any]=2.0 , _lowercase :List[Any]=True , _lowercase :Tuple=0.0 , _lowercase :List[str]=0.0 , _lowercase :List[str]=0.1 , _lowercase :Optional[int]="gelu" , _lowercase :Dict=False , _lowercase :Union[str, Any]=True , _lowercase :str=0.02 , _lowercase :str=1e-5 , _lowercase :Optional[Any]=True , _lowercase :Any=None , _lowercase :int=True , _lowercase :Any=10 , _lowercase :Optional[int]=8 , _lowercase :List[str]=["stage1", "stage2", "stage3"] , _lowercase :int=[1, 2, 3] , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = num_heads
lowercase__ = window_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_absolute_embeddings
lowercase__ = patch_norm
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = is_training
lowercase__ = scope
lowercase__ = use_labels
lowercase__ = type_sequence_label_size
lowercase__ = encoder_stride
lowercase__ = out_features
lowercase__ = out_indices
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCAmelCase ( self :Any , _lowercase :List[Any] , _lowercase :List[str] , _lowercase :Optional[Any] ):
'''simple docstring'''
lowercase__ = MaskFormerSwinModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase )
lowercase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase ( self :Tuple , _lowercase :int , _lowercase :Union[str, Any] , _lowercase :Any ):
'''simple docstring'''
lowercase__ = MaskFormerSwinBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowercase ):
lowercase__ = ["stem"]
lowercase__ = MaskFormerSwinBackbone(config=_lowercase )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
__lowerCamelCase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__lowerCamelCase = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = MaskFormerSwinModelTester(self )
lowercase__ = ConfigTester(self , config_class=_lowercase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
pass
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self :str ):
'''simple docstring'''
return
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowercase )
@unittest.skip("Swin does not use inputs_embeds" )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
pass
@unittest.skip("Swin does not support feedforward chunking" )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
pass
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowercase )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
pass
def UpperCAmelCase ( self :int , _lowercase :List[Any] , _lowercase :Dict , _lowercase :str , _lowercase :str ):
'''simple docstring'''
lowercase__ = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_lowercase , _lowercase ) )
lowercase__ = outputs.hidden_states
lowercase__ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
# Swin has a different seq_length
lowercase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = 3
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
pass
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_lowercase :Optional[Any] ):
lowercase__ = 0
return t
def check_equivalence(_lowercase :Optional[int] , _lowercase :List[str] , _lowercase :Optional[Any] , _lowercase :str={} ):
with torch.no_grad():
lowercase__ = model(**_lowercase , return_dict=_lowercase , **_lowercase )
lowercase__ = model(**_lowercase , return_dict=_lowercase , **_lowercase ).to_tuple()
def recursive_check(_lowercase :int , _lowercase :Dict ):
if isinstance(_lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowercase , _lowercase ):
recursive_check(_lowercase , _lowercase )
elif isinstance(_lowercase , _lowercase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowercase , _lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowercase ) , set_nan_tensor_to_zero(_lowercase ) , atol=1e-5 ) , msg=(
"Tuple and dict output are not equal. Difference:"
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}. Dict has'''
f''' `nan`: {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}.'''
) , )
recursive_check(_lowercase , _lowercase )
for model_class in self.all_model_classes:
lowercase__ = model_class(_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = self._prepare_for_class(_lowercase , _lowercase )
lowercase__ = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
lowercase__ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
lowercase__ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
lowercase__ = self._prepare_for_class(_lowercase , _lowercase )
lowercase__ = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {"output_hidden_states": True} )
lowercase__ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
lowercase__ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {"output_hidden_states": True} )
@require_torch
class lowerCAmelCase ( unittest.TestCase , lowercase_ ):
__lowerCamelCase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__lowerCamelCase = MaskFormerSwinConfig
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = MaskFormerSwinModelTester(self )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
lowercase__ = backbone_class(_lowercase )
backbone.to(_lowercase )
backbone.eval()
lowercase__ = backbone(**_lowercase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowercase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase__ = backbone(**_lowercase , output_hidden_states=_lowercase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase__ , lowercase__ , lowercase__ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase__ = backbone(**_lowercase , output_attentions=_lowercase )
self.assertIsNotNone(outputs.attentions )
| 611
| 0
|
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any ):
'''simple docstring'''
super().__init__()
__lowerCamelCase : Union[str, Any] = nn.Linear(3 , 4 )
__lowerCamelCase : str = nn.BatchNormad(4 )
__lowerCamelCase : Tuple = nn.Linear(4 , 5 )
def _snake_case ( self : Optional[Any] , _lowerCamelCase : int ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(_lowerCamelCase ) ) )
class _UpperCamelCase ( A ):
'''simple docstring'''
def _snake_case ( self : Any , _lowerCamelCase : int , *_lowerCamelCase : int , **_lowerCamelCase : Any ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class _UpperCamelCase ( A ):
'''simple docstring'''
def _snake_case ( self : List[Any] , _lowerCamelCase : int , _lowerCamelCase : Optional[int] ):
'''simple docstring'''
return output + 1
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase : List[str] = ModelForTest()
__lowerCamelCase : List[Any] = ModelHook()
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(test_model._hf_hook , _lowerCamelCase )
self.assertTrue(hasattr(_lowerCamelCase , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(_lowerCamelCase )
self.assertFalse(hasattr(_lowerCamelCase , """_hf_hook""" ) )
self.assertFalse(hasattr(_lowerCamelCase , """_old_forward""" ) )
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : Tuple = ModelForTest()
__lowerCamelCase : Tuple = ModelHook()
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
add_hook_to_module(_lowerCamelCase , _lowerCamelCase , append=_lowerCamelCase )
self.assertEqual(isinstance(test_model._hf_hook , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(_lowerCamelCase , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(_lowerCamelCase )
self.assertFalse(hasattr(_lowerCamelCase , """_hf_hook""" ) )
self.assertFalse(hasattr(_lowerCamelCase , """_old_forward""" ) )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : Any = ModelForTest()
__lowerCamelCase : List[Any] = torch.randn(2 , 3 )
__lowerCamelCase : Any = test_model(x + 1 )
__lowerCamelCase : Optional[int] = test_model(x + 2 )
__lowerCamelCase : int = PreForwardHook()
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Any = test_model(_lowerCamelCase )
self.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__lowerCamelCase : Optional[Any] = PreForwardHook()
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Dict = test_model(_lowerCamelCase )
self.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__lowerCamelCase : Optional[Any] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Optional[Any] = test_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-5 )
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowerCamelCase : List[str] = ModelForTest()
__lowerCamelCase : List[str] = torch.randn(2 , 3 )
__lowerCamelCase : Tuple = test_model(_lowerCamelCase )
__lowerCamelCase : str = PostForwardHook()
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Union[str, Any] = test_model(_lowerCamelCase )
self.assertTrue(torch.allclose(_lowerCamelCase , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__lowerCamelCase : int = PostForwardHook()
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Tuple = test_model(_lowerCamelCase )
self.assertTrue(torch.allclose(_lowerCamelCase , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Union[str, Any] = test_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , output + 2 , atol=1E-5 )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : int = ModelForTest()
__lowerCamelCase : int = torch.randn(2 , 3 )
__lowerCamelCase : List[Any] = test_model(_lowerCamelCase )
__lowerCamelCase : List[Any] = PostForwardHook()
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : List[Any] = test_model(_lowerCamelCase )
self.assertTrue(torch.allclose(_lowerCamelCase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Any = test_model(_lowerCamelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : str = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__lowerCamelCase : Tuple = torch.randn(2 , 3 )
__lowerCamelCase : Tuple = model(_lowerCamelCase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(_lowerCamelCase , AlignDevicesHook(io_same_device=_lowerCamelCase ) )
__lowerCamelCase : str = torch.randn(2 , 3 ).to(0 )
__lowerCamelCase : int = model(_lowerCamelCase )
self.assertEqual(output.device , torch.device(0 ) )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowerCamelCase : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__lowerCamelCase : Dict = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowerCamelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowerCamelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowerCamelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__lowerCamelCase : int = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , _lowerCamelCase )
__lowerCamelCase : Tuple = torch.randn(2 , 3 )
__lowerCamelCase : Optional[int] = model(_lowerCamelCase )
self.assertEqual(output.device , _lowerCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__lowerCamelCase : str = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowerCamelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowerCamelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowerCamelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__lowerCamelCase : List[Any] = torch.randn(2 , 3 )
__lowerCamelCase : Optional[Any] = model(_lowerCamelCase )
self.assertEqual(output.device , _lowerCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__lowerCamelCase : List[str] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(_lowerCamelCase , execution_device=_lowerCamelCase , offload=_lowerCamelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__lowerCamelCase : List[Any] = torch.device(_lowerCamelCase )
self.assertEqual(model.batchnorm.running_mean.device , _lowerCamelCase )
__lowerCamelCase : int = torch.randn(2 , 3 )
__lowerCamelCase : Any = model(_lowerCamelCase )
self.assertEqual(output.device , _lowerCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowerCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(_lowerCamelCase , execution_device=_lowerCamelCase , offload=_lowerCamelCase , offload_buffers=_lowerCamelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__lowerCamelCase : Optional[Any] = torch.randn(2 , 3 )
__lowerCamelCase : List[Any] = model(_lowerCamelCase )
self.assertEqual(output.device , _lowerCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowerCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__lowerCamelCase : List[Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
_lowerCamelCase , execution_device=_lowerCamelCase , offload=_lowerCamelCase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__lowerCamelCase : List[Any] = torch.device(_lowerCamelCase )
self.assertEqual(model.batchnorm.running_mean.device , _lowerCamelCase )
__lowerCamelCase : Any = torch.randn(2 , 3 )
__lowerCamelCase : Tuple = model(_lowerCamelCase )
self.assertEqual(output.device , _lowerCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowerCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
_lowerCamelCase , execution_device=_lowerCamelCase , offload=_lowerCamelCase , weights_map=model.state_dict() , offload_buffers=_lowerCamelCase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__lowerCamelCase : Dict = torch.randn(2 , 3 )
__lowerCamelCase : Optional[Any] = model(_lowerCamelCase )
self.assertEqual(output.device , _lowerCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowerCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 519
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCamelCase : int = 'pt'
elif is_tf_available():
__UpperCamelCase : int = 'tf'
else:
__UpperCamelCase : List[Any] = 'jax'
class _UpperCamelCase ( A,unittest.TestCase ):
'''simple docstring'''
a_ : str = PerceiverTokenizer
a_ : int = False
def _snake_case ( self : Tuple ):
'''simple docstring'''
super().setUp()
__lowerCamelCase : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self : Any ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained("""deepmind/language-perceiver""" )
def _snake_case ( self : Optional[int] , **_lowerCamelCase : Dict ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _snake_case ( self : int , _lowerCamelCase : int , _lowerCamelCase : List[Any]=False , _lowerCamelCase : int=2_0 , _lowerCamelCase : Optional[int]=5 ):
'''simple docstring'''
__lowerCamelCase : str = []
for i in range(len(_lowerCamelCase ) ):
try:
__lowerCamelCase : Tuple = tokenizer.decode([i] , clean_up_tokenization_spaces=_lowerCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__lowerCamelCase : Optional[Any] = list(filter(lambda _lowerCamelCase : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , _lowerCamelCase ) )
__lowerCamelCase : Any = list(filter(lambda _lowerCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_lowerCamelCase ) , _lowerCamelCase ) )
if max_length is not None and len(_lowerCamelCase ) > max_length:
__lowerCamelCase : Union[str, Any] = toks[:max_length]
if min_length is not None and len(_lowerCamelCase ) < min_length and len(_lowerCamelCase ) > 0:
while len(_lowerCamelCase ) < min_length:
__lowerCamelCase : List[str] = toks + toks
# toks_str = [t[1] for t in toks]
__lowerCamelCase : Optional[int] = [t[0] for t in toks]
# Ensure consistency
__lowerCamelCase : Union[str, Any] = tokenizer.decode(_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
if " " not in output_txt and len(_lowerCamelCase ) > 1:
__lowerCamelCase : Optional[Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowerCamelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowerCamelCase )
)
if with_prefix_space:
__lowerCamelCase : List[str] = """ """ + output_txt
__lowerCamelCase : Optional[int] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
return output_txt, output_ids
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : List[str] = self.perceiver_tokenizer
__lowerCamelCase : Union[str, Any] = """Unicode €."""
__lowerCamelCase : str = tokenizer(_lowerCamelCase )
__lowerCamelCase : Optional[int] = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded["""input_ids"""] , _lowerCamelCase )
# decoding
__lowerCamelCase : Optional[int] = tokenizer.decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , """[CLS]Unicode €.[SEP]""" )
__lowerCamelCase : Dict = tokenizer("""e è é ê ë""" )
__lowerCamelCase : Optional[Any] = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded["""input_ids"""] , _lowerCamelCase )
# decoding
__lowerCamelCase : List[str] = tokenizer.decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , """[CLS]e è é ê ë[SEP]""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """[CLS]e è é ê ë[SEP]""" )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = self.perceiver_tokenizer
__lowerCamelCase : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
__lowerCamelCase : int = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
__lowerCamelCase : List[Any] = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
if FRAMEWORK != "jax":
__lowerCamelCase : Tuple = list(batch.input_ids.numpy()[0] )
else:
__lowerCamelCase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowerCamelCase : Dict = self.perceiver_tokenizer
__lowerCamelCase : Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__lowerCamelCase : Any = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , _lowerCamelCase )
self.assertIn("""attention_mask""" , _lowerCamelCase )
self.assertNotIn("""decoder_input_ids""" , _lowerCamelCase )
self.assertNotIn("""decoder_attention_mask""" , _lowerCamelCase )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : List[Any] = self.perceiver_tokenizer
__lowerCamelCase : Optional[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
__lowerCamelCase : Union[str, Any] = tokenizer(
text_target=_lowerCamelCase , max_length=3_2 , padding="""max_length""" , truncation=_lowerCamelCase , return_tensors=_lowerCamelCase )
self.assertEqual(3_2 , targets["""input_ids"""].shape[1] )
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
__lowerCamelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCamelCase : Tuple = tempfile.mkdtemp()
__lowerCamelCase : Any = """ He is very happy, UNwant\u00E9d,running"""
__lowerCamelCase : Tuple = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
__lowerCamelCase : str = tokenizer.__class__.from_pretrained(_lowerCamelCase )
__lowerCamelCase : Dict = after_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
shutil.rmtree(_lowerCamelCase )
__lowerCamelCase : Optional[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCamelCase : Union[str, Any] = tempfile.mkdtemp()
__lowerCamelCase : int = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
__lowerCamelCase : Dict = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__lowerCamelCase : Optional[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
__lowerCamelCase : int = tokenizer.__class__.from_pretrained(_lowerCamelCase )
__lowerCamelCase : Any = after_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
__lowerCamelCase : Any = tokenizer.__class__.from_pretrained(_lowerCamelCase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(_lowerCamelCase )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__lowerCamelCase : str = json.load(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__lowerCamelCase : Dict = json.load(_lowerCamelCase )
__lowerCamelCase : Optional[Any] = [F"""<extra_id_{i}>""" for i in range(1_2_5 )]
__lowerCamelCase : Optional[int] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
__lowerCamelCase : List[Any] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(_lowerCamelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
with open(os.path.join(_lowerCamelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowerCamelCase : List[str] = tokenizer_class.from_pretrained(
_lowerCamelCase , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowerCamelCase : Tuple = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=_lowerCamelCase )]
__lowerCamelCase : str = tokenizer_class.from_pretrained(
_lowerCamelCase , additional_special_tokens=_lowerCamelCase , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowerCamelCase : List[str] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , """�""" )
def _snake_case ( self : Dict ):
'''simple docstring'''
pass
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
pass
def _snake_case ( self : List[Any] ):
'''simple docstring'''
pass
def _snake_case ( self : List[str] ):
'''simple docstring'''
pass
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : int = self.get_tokenizers(fast=_lowerCamelCase , do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__lowerCamelCase : Optional[int] = ["""[CLS]""", """t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """s""", """t""", """[SEP]"""]
__lowerCamelCase : Union[str, Any] = tokenizer.convert_tokens_to_string(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
| 519
| 1
|
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _lowerCAmelCase ( __snake_case ):
def __init__( self : str , a : Optional[int]=0.01 , a : Optional[int]=1000 ) -> List[Any]:
"""simple docstring"""
lowercase = p_stop
lowercase = max_length
def __iter__( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase = 0
lowercase = False
while not stop and count < self.max_length:
yield count
count += 1
lowercase = random.random() < self.p_stop
class _lowerCAmelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Any , a : Any , a : Union[str, Any] , a : Tuple=False , a : str=True ) -> List[Any]:
"""simple docstring"""
lowercase = [
BatchSamplerShard(a , 2 , a , split_batches=a , even_batches=a )
for i in range(2 )
]
lowercase = [list(a ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a ) for shard in batch_sampler_shards] , [len(a ) for e in expected] )
self.assertListEqual(a , a )
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowercase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a , a )
lowercase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowercase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a , a )
lowercase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowercase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a , a )
lowercase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowercase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a , a )
lowercase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is very small.
lowercase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
lowercase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a , a )
lowercase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
lowercase = [[], []]
self.check_batch_sampler_shards(a , a )
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowercase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
lowercase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a , split_batches=a )
# Check the shards when the dataset is not a round multiple of batch size.
lowercase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
lowercase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowercase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
lowercase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
# Check the shards when the dataset is very small.
lowercase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
lowercase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a , a , split_batches=a )
lowercase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
lowercase = [[], []]
self.check_batch_sampler_shards(a , a , split_batches=a )
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
lowercase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowercase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
lowercase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowercase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
lowercase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowercase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
lowercase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is very small.
lowercase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
lowercase = [[[0, 1]], []]
self.check_batch_sampler_shards(a , a , even_batches=a )
lowercase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
lowercase = [[], []]
self.check_batch_sampler_shards(a , a , even_batches=a )
def _lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
lowercase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size.
lowercase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
lowercase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowercase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
lowercase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
# Check the shards when the dataset is very small.
lowercase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
lowercase = [[[0, 1]], []]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
lowercase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
lowercase = [[], []]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
lowercase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
lowercase = [BatchSamplerShard(a , 2 , a , even_batches=a ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def _lowerCAmelCase ( self : List[Any] , a : int , a : Optional[int] , a : List[Any] , a : int=False , a : Optional[int]=2 , a : str=False ) -> Any:
"""simple docstring"""
random.seed(a )
lowercase = list(a )
lowercase = [
IterableDatasetShard(
a , batch_size=a , drop_last=a , num_processes=a , process_index=a , split_batches=a , )
for i in range(a )
]
lowercase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a )
iterable_dataset_lists.append(list(a ) )
lowercase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
lowercase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a ) , len(a ) )
self.assertTrue(len(a ) % shard_batch_size == 0 )
lowercase = []
for idx in range(0 , len(a ) , a ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a ) < len(a ):
reference += reference
self.assertListEqual(a , reference[: len(a )] )
def _lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase = 42
lowercase = RandomIterableDataset()
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
# Edge case with a very small dataset
lowercase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase = BatchSampler(range(16 ) , batch_size=4 , drop_last=a )
lowercase = SkipBatchSampler(a , 2 )
self.assertListEqual(list(a ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
lowercase = DataLoader(list(range(16 ) ) , batch_size=4 )
lowercase = skip_first_batches(a , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
Accelerator()
lowercase = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 703
|
"""simple docstring"""
# Algorithm for the pigeonhole sorting
def A_ ( __UpperCamelCase : str ):
lowercase = min(__UpperCamelCase ) # min() finds the minimum value
lowercase = max(__UpperCamelCase ) # max() finds the maximum value
lowercase = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
lowercase = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
lowercase = 0
for count in range(__UpperCamelCase ):
while holes[count] > 0:
holes[count] -= 1
lowercase = count + min_val
i += 1
def A_ ( ):
lowercase = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(__UpperCamelCase )
print('''Sorted order is:''' , ''' '''.join(__UpperCamelCase ) )
if __name__ == "__main__":
main()
| 396
| 0
|
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def _a ( _snake_case ):
"""simple docstring"""
def decorator(_snake_case ):
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , """handle_key""" , [] )
handle += [key]
setattr(SCREAMING_SNAKE_CASE_ , """handle_key""" , SCREAMING_SNAKE_CASE_ )
return func
return decorator
def _a ( *_snake_case ):
"""simple docstring"""
def decorator(_snake_case ):
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , """handle_key""" , [] )
handle += keys
setattr(SCREAMING_SNAKE_CASE_ , """handle_key""" , SCREAMING_SNAKE_CASE_ )
return func
return decorator
class lowerCamelCase__ ( A__ ):
def __new__( cls ,A ,A ,A ):
UpperCAmelCase = super().__new__(cls ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
if not hasattr(_UpperCamelCase ,"""key_handler""" ):
setattr(_UpperCamelCase ,"""key_handler""" ,{} )
setattr(_UpperCamelCase ,"""handle_input""" ,KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase = getattr(_UpperCamelCase ,"""handle_key""" ,[] )
for key in handled_keys:
UpperCAmelCase = value
return new_cls
@staticmethod
def _UpperCamelCase ( cls ):
UpperCAmelCase = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase = ord(_UpperCamelCase )
UpperCAmelCase = cls.key_handler.get(_UpperCamelCase )
if handler:
UpperCAmelCase = char
return handler(cls )
else:
return None
def _a ( cls ):
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 341
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModel)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 32
| 0
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
SCREAMING_SNAKE_CASE__ : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case :
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(UpperCamelCase_ )} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowercase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase_ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowercase_ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowercase_ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowercase_ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase_ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase_ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowercase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class snake_case ( UpperCamelCase_ ):
lowercase_ = 'train'
lowercase_ = 'dev'
class snake_case ( UpperCamelCase_ ):
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
def __init__( self : int , a_ : SquadDataTrainingArguments , a_ : PreTrainedTokenizer , a_ : Optional[int] = None , a_ : Union[str, Split] = Split.train , a_ : Optional[bool] = False , a_ : Optional[str] = None , a_ : Optional[str] = "pt" , )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = args
SCREAMING_SNAKE_CASE__ : List[Any] = is_language_sensitive
SCREAMING_SNAKE_CASE__ : Any = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(a_ , a_ ):
try:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
SCREAMING_SNAKE_CASE__ : Optional[int] = mode
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ : Any = 'v2' if args.version_2_with_negative else 'v1'
SCREAMING_SNAKE_CASE__ : int = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ : List[str] = cached_features_file + '.lock'
with FileLock(a_ ):
if os.path.exists(a_ ) and not args.overwrite_cache:
SCREAMING_SNAKE_CASE__ : Any = time.time()
SCREAMING_SNAKE_CASE__ : int = torch.load(a_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
SCREAMING_SNAKE_CASE__ : Tuple = self.old_features['features']
SCREAMING_SNAKE_CASE__ : Tuple = self.old_features.get('dataset' , a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.old_features.get('examples' , a_ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
' future run' )
else:
if mode == Split.dev:
SCREAMING_SNAKE_CASE__ : List[Any] = self.processor.get_dev_examples(args.data_dir )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = self.processor.get_train_examples(args.data_dir )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=a_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=a_ , )
SCREAMING_SNAKE_CASE__ : List[Any] = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , a_ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : Union[str, Any] )-> Any:
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[Any] , a_ : str )-> Dict[str, torch.Tensor]:
"""simple docstring"""
# Convert to Tensors and build dataset
SCREAMING_SNAKE_CASE__ : int = self.features[i]
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(feature.input_ids , dtype=torch.long )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(feature.token_type_ids , dtype=torch.long )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(feature.cls_index , dtype=torch.long )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(feature.p_mask , dtype=torch.float )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(feature.is_impossible , dtype=torch.float )
SCREAMING_SNAKE_CASE__ : str = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long )
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 636
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
def _a ( lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any]=False , lowercase__ : str=False , lowercase__ : Dict=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _a ( lowercase__ : List[str] , lowercase__ : Dict ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
SCREAMING_SNAKE_CASE__ : Dict = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_bias[-config.hidden_size :]
def _a ( lowercase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def _a ( lowercase__ : int , lowercase__ : int , lowercase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = dct.pop(lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = val
@torch.no_grad()
def _a ( lowercase__ : Dict , lowercase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : str = False
if "vqa" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : str = 31_29
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ : int = 'vqa2-id2label.json'
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Dict = idalabel
SCREAMING_SNAKE_CASE__ : str = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[str] = ViltForQuestionAnswering(lowercase__ )
elif "nlvr" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Dict = {0: 'False', 1: 'True'}
SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in config.idalabel.items()}
SCREAMING_SNAKE_CASE__ : Tuple = 3
SCREAMING_SNAKE_CASE__ : int = ViltForImagesAndTextClassification(lowercase__ )
elif "irtr" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : str = ViltForImageAndTextRetrieval(lowercase__ )
elif "mlm_itm" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : Optional[int] = ViltForMaskedLM(lowercase__ )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE__ : Any = torch.hub.load_state_dict_from_url(lowercase__ , map_location='cpu' )['state_dict']
SCREAMING_SNAKE_CASE__ : Any = create_rename_keys(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ )
if mlm_model or irtr_model:
SCREAMING_SNAKE_CASE__ : Any = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(lowercase__ )
# Define processor
SCREAMING_SNAKE_CASE__ : str = ViltImageProcessor(size=3_84 )
SCREAMING_SNAKE_CASE__ : List[Any] = BertTokenizer.from_pretrained('bert-base-uncased' )
SCREAMING_SNAKE_CASE__ : List[Any] = ViltProcessor(lowercase__ , lowercase__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
SCREAMING_SNAKE_CASE__ : List[str] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw )
SCREAMING_SNAKE_CASE__ : Any = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw )
SCREAMING_SNAKE_CASE__ : Tuple = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
SCREAMING_SNAKE_CASE__ : List[Any] = processor(lowercase__ , lowercase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : List[str] = processor(lowercase__ , lowercase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : List[Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
SCREAMING_SNAKE_CASE__ : Tuple = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=lowercase__ ).raw )
if mlm_model:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'a bunch of [MASK] laying on a [MASK].'
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'How many cats are there?'
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor(lowercase__ , lowercase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : str = model(**lowercase__ )
# Verify outputs
if mlm_model:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Size([1, 11, 3_05_22] )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 )
# verify masked token prediction equals "cats"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
SCREAMING_SNAKE_CASE__ : str = torch.Size([1, 31_29] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 )
# verify vqa prediction equals "2"
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Size([1, 2] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 636
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=False ) -> str:
_lowercase : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowercase : str = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
_lowercase : List[Any] = ''
else:
_lowercase : Dict = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase : Any = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
_lowercase : str = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : int = in_proj_weight[
: config.hidden_size, :
]
_lowercase : str = in_proj_bias[: config.hidden_size]
_lowercase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_lowercase : Tuple = in_proj_bias[-config.hidden_size :]
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : Dict = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
_lowercase : List[str] = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Union[str, Any] = dct.pop(lowerCamelCase_ )
_lowercase : str = val
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Any = ViTMSNConfig()
_lowercase : List[str] = 1000
_lowercase : List[Any] = 'datasets/huggingface/label-files'
_lowercase : Dict = 'imagenet-1k-id2label.json'
_lowercase : Optional[Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ ) , 'r' ) )
_lowercase : Tuple = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : Optional[Any] = idalabel
_lowercase : Tuple = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_lowercase : Any = 384
_lowercase : List[Any] = 1536
_lowercase : Dict = 6
elif "l16" in checkpoint_url:
_lowercase : List[str] = 1024
_lowercase : Optional[int] = 4096
_lowercase : Any = 24
_lowercase : List[str] = 16
_lowercase : Optional[int] = 0.1
elif "b4" in checkpoint_url:
_lowercase : int = 4
elif "l7" in checkpoint_url:
_lowercase : List[Any] = 7
_lowercase : Union[str, Any] = 1024
_lowercase : Union[str, Any] = 4096
_lowercase : str = 24
_lowercase : Any = 16
_lowercase : Any = 0.1
_lowercase : Any = ViTMSNModel(lowerCamelCase_ )
_lowercase : Any = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' )['target_encoder']
_lowercase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCamelCase_ )
_lowercase : Dict = create_rename_keys(lowerCamelCase_ , base_model=lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_ , base_model=lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
_lowercase : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
_lowercase : List[Any] = ViTImageProcessor(
size=config.image_size , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ )
_lowercase : Dict = image_processor(images=lowerCamelCase_ , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
_lowercase : Tuple = model(**lowerCamelCase_ )
_lowercase : Optional[int] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_lowercase : int = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
_lowercase : Optional[Any] = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
_lowercase : Union[str, Any] = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
_lowercase : List[Any] = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
_lowercase : Optional[Any] = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCamelCase_ , atol=1e-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 89
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=2, lowerCamelCase=99, lowerCamelCase=0, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase="last", lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=0, ) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : List[str] = seq_length
_lowercase : int = is_training
_lowercase : List[str] = use_input_lengths
_lowercase : int = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : Union[str, Any] = gelu_activation
_lowercase : List[str] = sinusoidal_embeddings
_lowercase : str = causal
_lowercase : Optional[int] = asm
_lowercase : Union[str, Any] = n_langs
_lowercase : List[Any] = vocab_size
_lowercase : Any = n_special
_lowercase : Any = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : List[str] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : int = num_labels
_lowercase : Optional[int] = num_choices
_lowercase : Optional[Any] = summary_type
_lowercase : Optional[Any] = use_proj
_lowercase : int = scope
_lowercase : List[Any] = bos_token_id
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : int = None
if self.use_input_lengths:
_lowercase : Dict = (
ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
_lowercase : Tuple = None
_lowercase : int = None
_lowercase : int = None
if self.use_labels:
_lowercase : str = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : str = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Dict = ids_tensor([self.batch_size], 2).float()
_lowercase : Tuple = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, num_labels=self.num_labels, bos_token_id=self.bos_token_id, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = XLMModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : str = model(lowerCamelCase, lengths=lowerCamelCase, langs=lowerCamelCase)
_lowercase : int = model(lowerCamelCase, langs=lowerCamelCase)
_lowercase : Any = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[Any]:
"""simple docstring"""
_lowercase : Dict = XLMWithLMHeadModel(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> str:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnsweringSimple(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase)
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
_lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnswering(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase)
_lowercase : List[Any] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, p_mask=lowerCamelCase, )
_lowercase : List[str] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, )
((_lowercase) , ) : Optional[Any] = result_with_labels.to_tuple()
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
((_lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, ())
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = XLMForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
_lowercase : Optional[int] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.num_labels
_lowercase : str = XLMForTokenClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Dict:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_choices
_lowercase : Optional[int] = XLMForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Optional[Any] = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : List[str] = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[Any] = config_and_inputs
_lowercase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, _a, unittest.TestCase ):
lowercase_ : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ : Optional[int] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase_ : Union[str, Any] = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=False) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_lowercase : Any = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
_lowercase : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
return inputs_dict
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = XLMModelTester(self)
_lowercase : List[str] = ConfigTester(self, config_class=lowerCamelCase, emb_dim=37)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> int:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_attentions in attentions], [True] * len(lowerCamelCase))
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : Dict = min_length + idx + 1
_lowercase : int = min_length + idx + 1
_lowercase : Dict = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(lowerCamelCase))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> List[Any]:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_hidden_states in hidden_states], [True] * len(lowerCamelCase), )
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : int = min_length + idx + 1
_lowercase : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(lowerCamelCase), )
pass
@slow
def UpperCamelCase ( self) -> int:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Dict = XLMModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([[14, 4_47]], dtype=torch.long, device=lowerCamelCase) # the president
_lowercase : Any = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_lowercase : str = model.generate(lowerCamelCase, do_sample=lowerCamelCase)
self.assertListEqual(output_ids[0].cpu().numpy().tolist(), lowerCamelCase)
| 89
| 1
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
_lowercase = parser.parse_args()
_lowercase = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_lowercase = CLIPImageProcessor()
_lowercase = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
_lowercase = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 713
|
import copy
import re
class lowercase_ :
__lowerCamelCase = "hp"
__lowerCamelCase = {}
__lowerCamelCase = None
@classmethod
def _snake_case ( cls , __A , __A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Dict =prefix
SCREAMING_SNAKE_CASE_ : Optional[Any] =defaults
cls.build_naming_info()
@staticmethod
def _snake_case ( __A , __A ) -> str:
if len(__A ) == 0:
return ""
SCREAMING_SNAKE_CASE_ : Any =None
if any(char.isdigit() for char in word ):
raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__A ) + 1 ):
SCREAMING_SNAKE_CASE_ : List[str] =word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
SCREAMING_SNAKE_CASE_ : int =prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__A ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =''''''
while integer != 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =chr(ord('''A''' ) + integer % 10 ) + s
integer //= 10
return s
SCREAMING_SNAKE_CASE_ : Dict =0
while True:
SCREAMING_SNAKE_CASE_ : Optional[int] =word + '''#''' + int_to_alphabetic(__A )
if sword in info["reverse_short_word"]:
continue
else:
SCREAMING_SNAKE_CASE_ : Tuple =sword
break
SCREAMING_SNAKE_CASE_ : List[str] =short_word
SCREAMING_SNAKE_CASE_ : str =word
return short_word
@staticmethod
def _snake_case ( __A , __A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : str =param_name.split('''_''' )
SCREAMING_SNAKE_CASE_ : str =[TrialShortNamer.shortname_for_word(__A , __A ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
SCREAMING_SNAKE_CASE_ : Tuple =['''''', '''_''']
for separator in separators:
SCREAMING_SNAKE_CASE_ : Dict =separator.join(__A )
if shortname not in info["reverse_short_param"]:
SCREAMING_SNAKE_CASE_ : List[str] =shortname
SCREAMING_SNAKE_CASE_ : Any =param_name
return shortname
return param_name
@staticmethod
def _snake_case ( __A , __A ) -> int:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =TrialShortNamer.shortname_for_key(__A , __A )
SCREAMING_SNAKE_CASE_ : Any =short_name
SCREAMING_SNAKE_CASE_ : Dict =param_name
@classmethod
def _snake_case ( cls ) -> Optional[int]:
if cls.NAMING_INFO is not None:
return
SCREAMING_SNAKE_CASE_ : Optional[Any] ={
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
SCREAMING_SNAKE_CASE_ : Tuple =list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__A , __A )
SCREAMING_SNAKE_CASE_ : str =info
@classmethod
def _snake_case ( cls , __A ) -> List[str]:
cls.build_naming_info()
assert cls.PREFIX is not None
SCREAMING_SNAKE_CASE_ : int =[copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
SCREAMING_SNAKE_CASE_ : Optional[int] =cls.NAMING_INFO['''short_param'''][k]
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE_ : List[Any] =1 if v else 0
SCREAMING_SNAKE_CASE_ : List[Any] ='''''' if isinstance(__A , (int, float) ) else '''-'''
SCREAMING_SNAKE_CASE_ : Optional[Any] =F'{key}{sep}{v}'
name.append(__A )
return "_".join(__A )
@classmethod
def _snake_case ( cls , __A ) -> Dict:
SCREAMING_SNAKE_CASE_ : Optional[int] =repr[len(cls.PREFIX ) + 1 :]
if repr == "":
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[]
else:
SCREAMING_SNAKE_CASE_ : Tuple =repr.split('''_''' )
SCREAMING_SNAKE_CASE_ : List[Any] ={}
for value in values:
if "-" in value:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] =value.split('''-''' )
else:
SCREAMING_SNAKE_CASE_ : Any =re.sub('''[0-9.]''' , '''''' , __A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =float(re.sub('''[^0-9.]''' , '''''' , __A ) )
SCREAMING_SNAKE_CASE_ : List[Any] =cls.NAMING_INFO['''reverse_short_param'''][p_k]
SCREAMING_SNAKE_CASE_ : Optional[Any] =p_v
for k in cls.DEFAULTS:
if k not in parameters:
SCREAMING_SNAKE_CASE_ : Tuple =cls.DEFAULTS[k]
return parameters
| 431
| 0
|
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def lowerCAmelCase_ ( _lowerCamelCase: float ):
if num <= 0:
raise ValueError("""math domain error""" )
return quad(_lowerCamelCase , 0 , _lowerCamelCase , args=(_lowerCamelCase) )[0]
def lowerCAmelCase_ ( _lowerCamelCase: float , _lowerCamelCase: float ):
return math.pow(_lowerCamelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 578
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowerCAmelCase_ ( _lowerCamelCase: List[Any]=None ):
if subparsers is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = subparsers.add_parser("""test""" )
else:
__SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=_lowerCamelCase , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=_lowerCamelCase )
return parser
def lowerCAmelCase_ ( _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
__SCREAMING_SNAKE_CASE : Tuple = script_name
else:
__SCREAMING_SNAKE_CASE : Any = F"--config_file={args.config_file} {script_name}"
__SCREAMING_SNAKE_CASE : str = ["""accelerate-launch"""] + test_args.split()
__SCREAMING_SNAKE_CASE : Any = execute_subprocess_async(_lowerCamelCase , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = test_command_parser()
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
test_command(_lowerCamelCase )
if __name__ == "__main__":
main()
| 578
| 1
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowercase ( _A , _A , _A ):
@register_to_config
def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : bool = False , ) -> List[Any]:
'''simple docstring'''
super().__init__()
lowercase = nn.Embedding(__lowerCamelCase , __lowerCamelCase )
lowercase = nn.Embedding(__lowerCamelCase , __lowerCamelCase )
lowercase = False
lowercase = nn.Dropout(p=__lowerCamelCase )
lowercase = TaConfig(
vocab_size=__lowerCamelCase , d_model=__lowerCamelCase , num_heads=__lowerCamelCase , d_kv=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , feed_forward_proj=__lowerCamelCase , is_decoder=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , )
lowercase = nn.ModuleList()
for lyr_num in range(__lowerCamelCase ):
lowercase = TaBlock(__lowerCamelCase )
self.encoders.append(__lowerCamelCase )
lowercase = TaLayerNorm(__lowerCamelCase )
lowercase = nn.Dropout(p=__lowerCamelCase )
def __a ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ) -> List[str]:
'''simple docstring'''
lowercase = self.token_embedder(__lowerCamelCase )
lowercase = encoder_input_tokens.shape[1]
lowercase = torch.arange(__lowerCamelCase , device=encoder_input_tokens.device )
x += self.position_encoding(__lowerCamelCase )
lowercase = self.dropout_pre(__lowerCamelCase )
# inverted the attention mask
lowercase = encoder_input_tokens.size()
lowercase = self.get_extended_attention_mask(__lowerCamelCase , __lowerCamelCase )
for lyr in self.encoders:
lowercase = lyr(__lowerCamelCase , __lowerCamelCase )[0]
lowercase = self.layer_norm(__lowerCamelCase )
return self.dropout_post(__lowerCamelCase ), encoder_inputs_mask
| 705
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __lowercase :
# setable values
lowercase = None
lowercase = None
lowercase = None # sigma(t_i)
@classmethod
def __a ( cls : List[str] ) -> Dict:
'''simple docstring'''
return cls()
@dataclass
class __lowercase ( _A ):
lowercase = 42
lowercase = 42
lowercase = 42
class __lowercase ( _A , _A ):
@property
def __a ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return True
@register_to_config
def __init__( self : int , __lowerCamelCase : float = 0.02 , __lowerCamelCase : float = 1_00 , __lowerCamelCase : float = 1.007 , __lowerCamelCase : float = 80 , __lowerCamelCase : float = 0.05 , __lowerCamelCase : float = 50 , ) -> List[Any]:
'''simple docstring'''
pass
def __a ( self : Dict ) -> Optional[int]:
'''simple docstring'''
return KarrasVeSchedulerState.create()
def __a ( self : str , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : int , __lowerCamelCase : Tuple = () ) -> KarrasVeSchedulerState:
'''simple docstring'''
lowercase = jnp.arange(0 , __lowerCamelCase )[::-1].copy()
lowercase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__lowerCamelCase , schedule=jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , timesteps=__lowerCamelCase , )
def __a ( self : Optional[Any] , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : float , __lowerCamelCase : random.KeyArray , ) -> Tuple[jnp.ndarray, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
lowercase = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowercase = 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase = random.split(__lowerCamelCase , num=1 )
lowercase = self.config.s_noise * random.normal(key=__lowerCamelCase , shape=sample.shape )
lowercase = sigma + gamma * sigma
lowercase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __a ( self : str , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
lowercase = sample_hat + sigma_hat * model_output
lowercase = (sample_hat - pred_original_sample) / sigma_hat
lowercase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__lowerCamelCase , derivative=__lowerCamelCase , state=__lowerCamelCase )
def __a ( self : str , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
lowercase = sample_prev + sigma_prev * model_output
lowercase = (sample_prev - pred_original_sample) / sigma_prev
lowercase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__lowerCamelCase , derivative=__lowerCamelCase , state=__lowerCamelCase )
def __a ( self : int , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> Tuple:
'''simple docstring'''
raise NotImplementedError()
| 479
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ : int = StableDiffusionPanoramaPipeline
lowerCAmelCase_ : int = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase_ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_a = DDIMScheduler()
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
_a = CLIPTextModel(UpperCamelCase__ )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self :List[Any] , UpperCamelCase__ :Dict , UpperCamelCase__ :Tuple=0 ):
_a = torch.manual_seed(UpperCamelCase__ )
_a = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
_a = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a = self.get_dummy_inputs(UpperCamelCase__ )
_a = sd_pipe(**UpperCamelCase__ ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self :Any ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
_a = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a = self.get_dummy_inputs(UpperCamelCase__ )
_a = "french fries"
_a = sd_pipe(**UpperCamelCase__ , negative_prompt=UpperCamelCase__ )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
_a = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a = self.get_dummy_inputs(UpperCamelCase__ )
_a = sd_pipe(**UpperCamelCase__ , view_batch_size=2 )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" )
_a = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
_a = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a = self.get_dummy_inputs(UpperCamelCase__ )
_a = sd_pipe(**UpperCamelCase__ ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , skip_prk_steps=UpperCamelCase__ )
_a = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
_a = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a = self.get_dummy_inputs(UpperCamelCase__ )
_a = sd_pipe(**UpperCamelCase__ ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self :int , UpperCamelCase__ :List[Any]=0 ):
_a = torch.manual_seed(UpperCamelCase__ )
_a = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
_a = "stabilityai/stable-diffusion-2-base"
_a = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder="scheduler" )
_a = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
_a = self.get_inputs()
_a = pipe(**UpperCamelCase__ ).images
_a = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
_a = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self :int ):
_a = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=UpperCamelCase__ )
_a = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
_a = self.get_inputs()
_a = pipe(**UpperCamelCase__ ).images
_a = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
_a = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE_ ( self :str ):
_a = 0
def callback_fn(UpperCamelCase__ :int , UpperCamelCase__ :int , UpperCamelCase__ :torch.FloatTensor ) -> None:
_a = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_a = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_a = latents[0, -3:, -3:, -1]
_a = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
_a = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_a = latents[0, -3:, -3:, -1]
_a = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
_a = False
_a = "stabilityai/stable-diffusion-2-base"
_a = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder="scheduler" )
_a = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
_a = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
_a = self.get_inputs()
pipe(**UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def SCREAMING_SNAKE_CASE_ ( self :str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_a = "stabilityai/stable-diffusion-2-base"
_a = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder="scheduler" )
_a = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
_a = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_a = self.get_inputs()
_a = pipe(**UpperCamelCase__ )
_a = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 388
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __snake_case :
"""simple docstring"""
def __init__( self :str , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Optional[int]=13 , UpperCamelCase__ :Optional[Any]=7 , UpperCamelCase__ :Dict=True , UpperCamelCase__ :Union[str, Any]=True , UpperCamelCase__ :Any=True , UpperCamelCase__ :Tuple=True , UpperCamelCase__ :Dict=99 , UpperCamelCase__ :Union[str, Any]=32 , UpperCamelCase__ :Dict=2 , UpperCamelCase__ :List[str]=4 , UpperCamelCase__ :Any=37 , UpperCamelCase__ :int="gelu" , UpperCamelCase__ :str=0.1 , UpperCamelCase__ :Union[str, Any]=0.1 , UpperCamelCase__ :Optional[Any]=512 , UpperCamelCase__ :Optional[Any]=16 , UpperCamelCase__ :Optional[Any]=2 , UpperCamelCase__ :Optional[Any]=0.02 , UpperCamelCase__ :List[Any]=False , UpperCamelCase__ :Union[str, Any]=True , UpperCamelCase__ :Optional[int]="None" , UpperCamelCase__ :Any=3 , UpperCamelCase__ :Optional[int]=4 , UpperCamelCase__ :List[str]=None , ):
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = relative_attention
_a = position_biased_input
_a = pos_att_type
_a = scope
def SCREAMING_SNAKE_CASE_ ( self :Any ):
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self :Dict , UpperCamelCase__ :int , UpperCamelCase__ :int , UpperCamelCase__ :Tuple , UpperCamelCase__ :List[str] , UpperCamelCase__ :Tuple , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :int ):
_a = TFDebertaVaModel(config=UpperCamelCase__ )
_a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_a = [input_ids, input_mask]
_a = model(UpperCamelCase__ )
_a = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self :Tuple , UpperCamelCase__ :int , UpperCamelCase__ :str , UpperCamelCase__ :str , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :str , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :List[str] ):
_a = TFDebertaVaForMaskedLM(config=UpperCamelCase__ )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_a = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self :Any , UpperCamelCase__ :str , UpperCamelCase__ :Tuple , UpperCamelCase__ :List[str] , UpperCamelCase__ :str , UpperCamelCase__ :Dict , UpperCamelCase__ :Tuple , UpperCamelCase__ :Any ):
_a = self.num_labels
_a = TFDebertaVaForSequenceClassification(config=UpperCamelCase__ )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_a = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self :List[str] , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :int , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :int , UpperCamelCase__ :Any , UpperCamelCase__ :int , UpperCamelCase__ :Optional[Any] ):
_a = self.num_labels
_a = TFDebertaVaForTokenClassification(config=UpperCamelCase__ )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_a = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , UpperCamelCase__ :Dict , UpperCamelCase__ :Dict , UpperCamelCase__ :str , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Tuple , UpperCamelCase__ :List[str] , UpperCamelCase__ :Any ):
_a = TFDebertaVaForQuestionAnswering(config=UpperCamelCase__ )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_a = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : str = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Any = False
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
_a = TFDebertaVaModelTester(self )
_a = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self :str ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Any ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
_a = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="Model not available yet" )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self :str ):
_a = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
_a = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_a = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_a = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
_a = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1E-4 )
| 388
| 1
|
from jiwer import compute_measures
import datasets
A__ : Tuple = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
A__ : List[str] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
A__ : Any = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Value('''string''', id='''sequence''' ),
'''references''': datasets.Value('''string''', id='''sequence''' ),
} ), codebase_urls=['''https://github.com/jitsi/jiwer/'''], reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
], )
def lowercase__ ( self : Optional[Any], lowerCamelCase : List[str]=None, lowerCamelCase : List[Any]=None, lowerCamelCase : Optional[Any]=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(lowerCamelCase, lowerCamelCase )["wer"]
else:
lowercase__ = 0
lowercase__ = 0
for prediction, reference in zip(lowerCamelCase, lowerCamelCase ):
lowercase__ = compute_measures(lowerCamelCase, lowerCamelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 671
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A__ : Dict = 50_00_00
A__ , A__ : str = os.path.split(__file__)
A__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.map(**lowerCamelCase_ )
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.filter(**lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase__ = generate_example_dataset(
os.path.join(lowerCamelCase_ , '''dataset.arrow''' ) , lowerCamelCase_ , num_examples=lowerCamelCase_ )
lowercase__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase_ )
def tokenize(lowerCamelCase_ ):
return tokenizer(examples['''text'''] )
lowercase__ = map(lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''numpy''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''pandas''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = filter(lowerCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase_ , '''wb''' ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 671
| 1
|
import random
def lowerCAmelCase__( lowercase : Dict , lowercase : Optional[int] , lowercase : Tuple ) -> Any:
__snake_case : str = a[left_index]
__snake_case : List[Any] = left_index + 1
for j in range(left_index + 1 , lowercase ):
if a[j] < pivot:
__snake_case , __snake_case : Tuple = a[i], a[j]
i += 1
__snake_case , __snake_case : str = a[i - 1], a[left_index]
return i - 1
def lowerCAmelCase__( lowercase : Any , lowercase : int , lowercase : Tuple ) -> List[str]:
if left < right:
__snake_case : List[Any] = random.randint(lowercase , right - 1 )
__snake_case , __snake_case : Tuple = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__snake_case : List[Any] = partition(lowercase , lowercase , lowercase )
quick_sort_random(
lowercase , lowercase , lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowercase , pivot_index + 1 , lowercase ) # recursive quicksort to the right of the pivot point
def lowerCAmelCase__( ) -> Any:
__snake_case : int = input("Enter numbers separated by a comma:\n" ).strip()
__snake_case : Tuple = [int(lowercase ) for item in user_input.split("," )]
quick_sort_random(lowercase , 0 , len(lowercase ) )
print(lowercase )
if __name__ == "__main__":
main()
| 243
|
def lowerCAmelCase__( lowercase : list , lowercase : list , lowercase : int , lowercase : int , lowercase : int ) -> int:
if index == number_of_items:
return 0
__snake_case : Optional[int] = 0
__snake_case : List[Any] = 0
__snake_case : int = knapsack(lowercase , lowercase , lowercase , lowercase , index + 1 )
if weights[index] <= max_weight:
__snake_case : List[str] = values[index] + knapsack(
lowercase , lowercase , lowercase , max_weight - weights[index] , index + 1 )
return max(lowercase , lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 243
| 1
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self ) -> List[str]:
# test for the above condition
self.test()
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = 0
__lowerCAmelCase = False
while not completed:
if counter == 1:
self.reset()
__lowerCAmelCase = self.advance()
if not self.does_advance(UpperCamelCase ):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.update(UpperCamelCase )
counter += 1
if counter > 1_0000:
raise Exception("update() does not fulfill the constraint." )
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly." )
@abstractmethod
def UpperCAmelCase_ ( self ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self ) -> int:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self ) -> int:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> str:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self , UpperCamelCase ) -> Dict:
super(UpperCamelCase , self ).__init__()
if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
__lowerCAmelCase = token_ids
__lowerCAmelCase = len(self.token_ids )
__lowerCAmelCase = -1 # the index of the currently fulfilled step
__lowerCAmelCase = False
def UpperCAmelCase_ ( self ) -> Optional[int]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
if self.does_advance(UpperCamelCase ):
self.fulfilled_idx += 1
__lowerCAmelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
__lowerCAmelCase = True
__lowerCAmelCase = completed
else:
# failed to make progress.
__lowerCAmelCase = True
self.reset()
return stepped, completed, reset
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = False
__lowerCAmelCase = 0
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Optional[Any]:
__lowerCAmelCase = PhrasalConstraint(self.token_ids )
if stateful:
__lowerCAmelCase = self.seqlen
__lowerCAmelCase = self.fulfilled_idx
__lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase , UpperCamelCase=True ) -> Optional[int]:
__lowerCAmelCase = max([len(UpperCamelCase ) for one in nested_token_ids] )
__lowerCAmelCase = {}
for token_ids in nested_token_ids:
__lowerCAmelCase = root
for tidx, token_id in enumerate(UpperCamelCase ):
if token_id not in level:
__lowerCAmelCase = {}
__lowerCAmelCase = level[token_id]
if no_subsets and self.has_subsets(UpperCamelCase , UpperCamelCase ):
raise ValueError(
"Each list in `nested_token_ids` can't be a complete subset of another list, but is"
F''' {nested_token_ids}.''' )
__lowerCAmelCase = root
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
__lowerCAmelCase = self.trie
for current_token in current_seq:
__lowerCAmelCase = start[current_token]
__lowerCAmelCase = list(start.keys() )
return next_tokens
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
__lowerCAmelCase = self.next_tokens(UpperCamelCase )
return len(UpperCamelCase ) == 0
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = list(root.values() )
if len(UpperCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCamelCase ) for nn in next_nodes] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
__lowerCAmelCase = self.count_leaves(UpperCamelCase )
return len(UpperCamelCase ) != leaf_count
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self , UpperCamelCase ) -> List[Any]:
super(UpperCamelCase , self ).__init__()
if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(UpperCamelCase , UpperCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
__lowerCAmelCase = DisjunctiveTrie(UpperCamelCase )
__lowerCAmelCase = nested_token_ids
__lowerCAmelCase = self.trie.max_height
__lowerCAmelCase = []
__lowerCAmelCase = False
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = self.trie.next_tokens(self.current_seq )
if len(UpperCamelCase ) == 0:
return None
else:
return token_list
def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[str]:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
if self.does_advance(UpperCamelCase ):
self.current_seq.append(UpperCamelCase )
__lowerCAmelCase = True
else:
__lowerCAmelCase = True
self.reset()
__lowerCAmelCase = self.trie.reached_leaf(self.current_seq )
__lowerCAmelCase = completed
return stepped, completed, reset
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = False
__lowerCAmelCase = []
def UpperCAmelCase_ ( self ) -> int:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Union[str, Any]:
__lowerCAmelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
__lowerCAmelCase = self.seqlen
__lowerCAmelCase = self.current_seq
__lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase ) -> Union[str, Any]:
__lowerCAmelCase = constraints
# max # of steps required to fulfill a given constraint
__lowerCAmelCase = max([c.seqlen for c in constraints] )
__lowerCAmelCase = len(UpperCamelCase )
__lowerCAmelCase = False
self.init_state()
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = []
__lowerCAmelCase = None
__lowerCAmelCase = [constraint.copy(stateful=UpperCamelCase ) for constraint in self.constraints]
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__lowerCAmelCase = constraint.advance()
if isinstance(UpperCamelCase , UpperCamelCase ):
token_list.append(UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
token_list.extend(UpperCamelCase )
else:
__lowerCAmelCase = self.inprogress_constraint.advance()
if isinstance(UpperCamelCase , UpperCamelCase ):
token_list.append(UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
token_list.extend(UpperCamelCase )
if len(UpperCamelCase ) == 0:
return None
else:
return token_list
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__lowerCAmelCase , __lowerCAmelCase = self.add(UpperCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Dict:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
__lowerCAmelCase , __lowerCAmelCase = False, False
if self.completed:
__lowerCAmelCase = True
__lowerCAmelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.inprogress_constraint.update(UpperCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase ) )
__lowerCAmelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__lowerCAmelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
__lowerCAmelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCamelCase ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = pending_constraint.update(UpperCamelCase )
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true." )
if complete:
self.complete_constraints.append(UpperCamelCase )
__lowerCAmelCase = None
if not complete and stepped:
__lowerCAmelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__lowerCAmelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__lowerCAmelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCAmelCase_ ( self , UpperCamelCase=True ) -> str:
__lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__lowerCAmelCase = [
constraint.copy(stateful=UpperCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__lowerCAmelCase = self.inprogress_constraint.copy(stateful=UpperCamelCase )
__lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 713
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase : str = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase : Optional[Any] = {
'''squeezebert/squeezebert-uncased''': 5_1_2,
'''squeezebert/squeezebert-mnli''': 5_1_2,
'''squeezebert/squeezebert-mnli-headless''': 5_1_2,
}
lowerCAmelCase : Tuple = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Dict = VOCAB_FILES_NAMES
a : Any = PRETRAINED_VOCAB_FILES_MAP
a : Dict = PRETRAINED_INIT_CONFIGURATION
a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[Any] = SqueezeBertTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ) -> List[Any]:
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(UpperCamelCase , normalizer_state.pop("type" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**UpperCamelCase )
__lowerCAmelCase = do_lower_case
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None ) -> str:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
import numpy as np
import datasets
A_ = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
A_ = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
A_ = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def __UpperCamelCase ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" ,id="sequence" ) ,id="X" ),
} ) ,)
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Tuple ):
# convert to numpy arrays
SCREAMING_SNAKE_CASE:Union[str, Any] = np.array(__UpperCAmelCase )
SCREAMING_SNAKE_CASE:Optional[int] = np.array(__UpperCAmelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
SCREAMING_SNAKE_CASE:Dict = X - np.mean(__UpperCAmelCase )
SCREAMING_SNAKE_CASE:Optional[int] = np.cov(reference_distribution.T )
try:
SCREAMING_SNAKE_CASE:List[Any] = np.linalg.inv(__UpperCAmelCase )
except np.linalg.LinAlgError:
SCREAMING_SNAKE_CASE:str = np.linalg.pinv(__UpperCAmelCase )
SCREAMING_SNAKE_CASE:List[str] = np.dot(__UpperCAmelCase ,__UpperCAmelCase )
SCREAMING_SNAKE_CASE:List[str] = np.dot(__UpperCAmelCase ,X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 143
|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : Dict = ConsistencyModelPipeline
a__ : Tuple = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a__ : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
a__ : Any = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
])
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
UpperCAmelCase_= UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet""" , )
return unet
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_= UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , )
return unet
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : int=False ) -> List[Any]:
if class_cond:
UpperCAmelCase_= self.dummy_cond_unet
else:
UpperCAmelCase_= self.dummy_uncond_unet
# Default to CM multistep sampler
UpperCAmelCase_= CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_= {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def _SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str=0 ) -> Optional[Any]:
if str(__UpperCAmelCase ).startswith("""mps""" ):
UpperCAmelCase_= torch.manual_seed(__UpperCAmelCase )
else:
UpperCAmelCase_= torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCAmelCase_= {
"""batch_size""": 1,
"""num_inference_steps""": None,
"""timesteps""": [22, 0],
"""generator""": generator,
"""output_type""": """np""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase_= """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_= self.get_dummy_components()
UpperCAmelCase_= ConsistencyModelPipeline(**__UpperCAmelCase )
UpperCAmelCase_= pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
UpperCAmelCase_= """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_= self.get_dummy_components(class_cond=__UpperCAmelCase )
UpperCAmelCase_= ConsistencyModelPipeline(**__UpperCAmelCase )
UpperCAmelCase_= pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_= 0
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
UpperCAmelCase_= """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_= self.get_dummy_components()
UpperCAmelCase_= ConsistencyModelPipeline(**__UpperCAmelCase )
UpperCAmelCase_= pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_= 1
UpperCAmelCase_= None
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_= """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_= self.get_dummy_components(class_cond=__UpperCAmelCase )
UpperCAmelCase_= ConsistencyModelPipeline(**__UpperCAmelCase )
UpperCAmelCase_= pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_= 1
UpperCAmelCase_= None
UpperCAmelCase_= 0
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : int="cpu" , __UpperCAmelCase : str=torch.floataa , __UpperCAmelCase : Tuple=(1, 3, 64, 64) ) -> str:
UpperCAmelCase_= torch.manual_seed(__UpperCAmelCase )
UpperCAmelCase_= {
"""num_inference_steps""": None,
"""timesteps""": [22, 0],
"""class_labels""": 0,
"""generator""": generator,
"""output_type""": """np""",
}
if get_fixed_latents:
UpperCAmelCase_= self.get_fixed_latents(seed=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase , shape=__UpperCAmelCase )
UpperCAmelCase_= latents
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Any=0 , __UpperCAmelCase : int="cpu" , __UpperCAmelCase : Optional[Any]=torch.floataa , __UpperCAmelCase : Any=(1, 3, 64, 64) ) -> List[str]:
if type(__UpperCAmelCase ) == str:
UpperCAmelCase_= torch.device(__UpperCAmelCase )
UpperCAmelCase_= torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCAmelCase_= randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase )
return latents
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
UpperCAmelCase_= UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCAmelCase_= CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_= ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_inputs()
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
UpperCAmelCase_= UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCAmelCase_= CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_= ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_inputs()
UpperCAmelCase_= 1
UpperCAmelCase_= None
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
UpperCAmelCase_= UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCAmelCase_= CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_= ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_inputs(get_fixed_latents=__UpperCAmelCase , device=__UpperCAmelCase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__UpperCAmelCase , enable_math=__UpperCAmelCase , enable_mem_efficient=__UpperCAmelCase ):
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
UpperCAmelCase_= UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCAmelCase_= CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_= ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_inputs(get_fixed_latents=__UpperCAmelCase , device=__UpperCAmelCase )
UpperCAmelCase_= 1
UpperCAmelCase_= None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__UpperCAmelCase , enable_math=__UpperCAmelCase , enable_mem_efficient=__UpperCAmelCase ):
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 593
| 0
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowercase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
| 720
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCAmelCase = logging.getLogger(__name__)
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = """token-classification"""
def __init__( self , snake_case ):
if type(snake_case ) == dict:
lowercase = Namespace(**snake_case )
lowercase = import_module('tasks' )
try:
lowercase = getattr(snake_case , hparams.task_type )
lowercase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
lowercase = self.token_classification_task.get_labels(hparams.labels )
lowercase = CrossEntropyLoss().ignore_index
super().__init__(snake_case , len(self.labels ) , self.mode )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
return self.model(**snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type != "distilbert":
lowercase = (
batch[2] if self.config.model_type in ['bert', 'xlnet'] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase = self(**snake_case )
lowercase = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.hparams
for mode in ["train", "dev", "test"]:
lowercase = self._feature_file(snake_case )
if os.path.exists(snake_case ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , snake_case )
lowercase = torch.load(snake_case )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
lowercase = self.token_classification_task.read_examples_from_file(args.data_dir , snake_case )
lowercase = self.token_classification_task.convert_examples_to_features(
snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['xlnet'] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['xlnet'] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=snake_case , pad_on_left=bool(self.config.model_type in ['xlnet'] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('Saving features into cached file %s' , snake_case )
torch.save(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = False ):
lowercase = self._feature_file(snake_case )
logger.info('Loading features from cached file %s' , snake_case )
lowercase = torch.load(snake_case )
lowercase = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(snake_case , snake_case , snake_case , snake_case ) , batch_size=snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
"""Compute validation""" ""
lowercase = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type != "distilbert":
lowercase = (
batch[2] if self.config.model_type in ['bert', 'xlnet'] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase = self(**snake_case )
lowercase , lowercase = outputs[:2]
lowercase = logits.detach().cpu().numpy()
lowercase = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = torch.stack([x['val_loss'] for x in outputs] ).mean()
lowercase = np.concatenate([x['pred'] for x in outputs] , axis=0 )
lowercase = np.argmax(snake_case , axis=2 )
lowercase = np.concatenate([x['target'] for x in outputs] , axis=0 )
lowercase = dict(enumerate(self.labels ) )
lowercase = [[] for _ in range(out_label_ids.shape[0] )]
lowercase = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase = {
'val_loss': val_loss_mean,
'accuracy_score': accuracy_score(snake_case , snake_case ),
'precision': precision_score(snake_case , snake_case ),
'recall': recall_score(snake_case , snake_case ),
'f1': fa_score(snake_case , snake_case ),
}
lowercase = dict(results.items() )
lowercase = results
return ret, preds_list, out_label_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# when stable
lowercase , lowercase , lowercase = self._eval_end(snake_case )
lowercase = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# updating to test_epoch_end instead of deprecated test_end
lowercase , lowercase , lowercase = self._eval_end(snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case , snake_case ):
# Add NER specific options
BaseTransformer.add_model_specific_args(snake_case , snake_case )
parser.add_argument(
'--task_type' , default='NER' , type=snake_case , help='Task type to fine tune in training (e.g. NER, POS, etc)' )
parser.add_argument(
'--max_seq_length' , default=128 , type=snake_case , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--labels' , default='' , type=snake_case , help='Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.' , )
parser.add_argument(
'--gpus' , default=0 , type=snake_case , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCAmelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = NERTransformer(args)
UpperCAmelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCAmelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
UpperCAmelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 565
| 0
|
"""simple docstring"""
from math import sqrt
def __a ( a ):
"""simple docstring"""
assert isinstance(a, a ) and (
number >= 0
), "'number' must been an int and positive"
_a = True
# 0 and 1 are none primes.
if number <= 1:
_a = False
for divisor in range(2, int(round(sqrt(a ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_a = False
break
# precondition
assert isinstance(a, a ), "'status' must been from type bool"
return status
def __a ( a ):
"""simple docstring"""
assert isinstance(a, a ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_a = list(range(2, n + 1 ) )
_a = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(a ) ):
for j in range(i + 1, len(a ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_a = 0
# filters actual prime numbers.
_a = [x for x in begin_list if x != 0]
# precondition
assert isinstance(a, a ), "'ans' must been from type list"
return ans
def __a ( a ):
"""simple docstring"""
assert isinstance(a, a ) and (n > 2), "'N' must been an int and > 2"
_a = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2, n + 1 ):
if is_prime(a ):
ans.append(a )
# precondition
assert isinstance(a, a ), "'ans' must been from type list"
return ans
def __a ( a ):
"""simple docstring"""
assert isinstance(a, a ) and number >= 0, "'number' must been an int and >= 0"
_a = [] # this list will be returns of the function.
# potential prime number factors.
_a = 2
_a = number
if number == 0 or number == 1:
ans.append(a )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(a ):
while quotient != 1:
if is_prime(a ) and (quotient % factor == 0):
ans.append(a )
quotient /= factor
else:
factor += 1
else:
ans.append(a )
# precondition
assert isinstance(a, a ), "'ans' must been from type list"
return ans
def __a ( a ):
"""simple docstring"""
assert isinstance(a, a ) and (
number >= 0
), "'number' bust been an int and >= 0"
_a = 0
# prime factorization of 'number'
_a = prime_factorization(a )
_a = max(a )
# precondition
assert isinstance(a, a ), "'ans' must been from type int"
return ans
def __a ( a ):
"""simple docstring"""
assert isinstance(a, a ) and (
number >= 0
), "'number' bust been an int and >= 0"
_a = 0
# prime factorization of 'number'
_a = prime_factorization(a )
_a = min(a )
# precondition
assert isinstance(a, a ), "'ans' must been from type int"
return ans
def __a ( a ):
"""simple docstring"""
assert isinstance(a, a ), "'number' must been an int"
assert isinstance(number % 2 == 0, a ), "compare bust been from type bool"
return number % 2 == 0
def __a ( a ):
"""simple docstring"""
assert isinstance(a, a ), "'number' must been an int"
assert isinstance(number % 2 != 0, a ), "compare bust been from type bool"
return number % 2 != 0
def __a ( a ):
"""simple docstring"""
assert (
isinstance(a, a ) and (number > 2) and is_even(a )
), "'number' must been an int, even and > 2"
_a = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_a = get_prime_numbers(a )
_a = len(a )
# run variable for while-loops.
_a = 0
_a = None
# exit variable. for break up the loops
_a = True
while i < len_pn and loop:
_a = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_a = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(a, a )
and (len(a ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __a ( a, a ):
"""simple docstring"""
assert (
isinstance(a, a )
and isinstance(a, a )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_a = 0
while numbera != 0:
_a = numbera % numbera
_a = numbera
_a = rest
# precondition
assert isinstance(a, a ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __a ( a, a ):
"""simple docstring"""
assert (
isinstance(a, a )
and isinstance(a, a )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_a = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_a = prime_factorization(a )
_a = prime_factorization(a )
elif numbera == 1 or numbera == 1:
_a = []
_a = []
_a = max(a, a )
_a = 0
_a = 0
_a = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_a = prime_fac_a.count(a )
_a = prime_fac_a.count(a )
for _ in range(max(a, a ) ):
ans *= n
else:
_a = prime_fac_a.count(a )
for _ in range(a ):
ans *= n
done.append(a )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_a = prime_fac_a.count(a )
for _ in range(a ):
ans *= n
done.append(a )
# precondition
assert isinstance(a, a ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __a ( a ):
"""simple docstring"""
assert isinstance(a, a ) and (n >= 0), "'number' must been a positive int"
_a = 0
_a = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(a ):
ans += 1
# precondition
assert isinstance(a, a ) and is_prime(
a ), "'ans' must been a prime number and from type int"
return ans
def __a ( a, a ):
"""simple docstring"""
assert (
is_prime(a ) and is_prime(a ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_a = p_number_a + 1 # jump to the next number
_a = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(a ):
number += 1
while number < p_number_a:
ans.append(a )
number += 1
# fetch the next prime number.
while not is_prime(a ):
number += 1
# precondition
assert (
isinstance(a, a )
and ans[0] != p_number_a
and ans[len(a ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __a ( a ):
"""simple docstring"""
assert isinstance(a, a ) and (n >= 1), "'n' must been int and >= 1"
_a = [] # will be returned.
for divisor in range(1, n + 1 ):
if n % divisor == 0:
ans.append(a )
# precondition
assert ans[0] == 1 and ans[len(a ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __a ( a ):
"""simple docstring"""
assert isinstance(a, a ) and (
number > 1
), "'number' must been an int and >= 1"
_a = get_divisors(a )
# precondition
assert (
isinstance(a, a )
and (divisors[0] == 1)
and (divisors[len(a ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def __a ( a, a ):
"""simple docstring"""
assert (
isinstance(a, a )
and isinstance(a, a )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_a = gcd(abs(a ), abs(a ) )
# precondition
assert (
isinstance(a, a )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __a ( a ):
"""simple docstring"""
assert isinstance(a, a ) and (n >= 0), "'n' must been a int and >= 0"
_a = 1 # this will be return.
for factor in range(1, n + 1 ):
ans *= factor
return ans
def __a ( a ):
"""simple docstring"""
assert isinstance(a, a ) and (n >= 0), "'n' must been an int and >= 0"
_a = 0
_a = 1
_a = 1 # this will be return
for _ in range(n - 1 ):
_a = ans
ans += fiba
_a = tmp
return ans
| 388
|
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class __snake_case ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase_ : List[str] = 'bart'
lowerCAmelCase_ : Any = ['past_key_values']
lowerCAmelCase_ : Dict = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self :Dict , UpperCamelCase__ :Any=50_265 , UpperCamelCase__ :Optional[Any]=1_024 , UpperCamelCase__ :Tuple=12 , UpperCamelCase__ :List[Any]=4_096 , UpperCamelCase__ :Any=16 , UpperCamelCase__ :Optional[Any]=12 , UpperCamelCase__ :Dict=4_096 , UpperCamelCase__ :Optional[int]=16 , UpperCamelCase__ :Dict=0.0 , UpperCamelCase__ :Tuple=0.0 , UpperCamelCase__ :Any="gelu" , UpperCamelCase__ :Optional[Any]=1_024 , UpperCamelCase__ :Tuple=0.1 , UpperCamelCase__ :Optional[Any]=0.0 , UpperCamelCase__ :Any=0.0 , UpperCamelCase__ :Optional[int]=0.02 , UpperCamelCase__ :List[Any]=0.0 , UpperCamelCase__ :Optional[Any]=False , UpperCamelCase__ :Union[str, Any]=True , UpperCamelCase__ :Optional[Any]=3 , UpperCamelCase__ :Any=1 , UpperCamelCase__ :Tuple=0 , UpperCamelCase__ :Optional[int]=2 , UpperCamelCase__ :Optional[int]=True , UpperCamelCase__ :List[Any]=2 , UpperCamelCase__ :Optional[Any]=2 , **UpperCamelCase__ :List[Any] , ):
_a = vocab_size
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = classifier_dropout
_a = use_cache
_a = encoder_layers
_a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , UpperCamelCase__ ):
_a = self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"The config can simply be saved and uploaded again to be fixed." )
class __snake_case ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
if self.task in ["default", "seq2seq-lm"]:
_a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_a = {0: "batch"}
_a = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_a = {0: "batch", 1: "decoder_sequence"}
_a = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_a , _a = self.num_layers
for i in range(UpperCamelCase__ ):
_a = {0: "batch", 2: "past_sequence + sequence"}
_a = {0: "batch", 2: "past_sequence + sequence"}
else:
_a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE_ ( self :int ):
if self.task in ["default", "seq2seq-lm"]:
_a = super().outputs
else:
_a = super(UpperCamelCase__ , self ).outputs
if self.use_past:
_a , _a = self.num_layers
for i in range(UpperCamelCase__ ):
_a = {0: "batch", 2: "past_sequence + sequence"}
_a = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def SCREAMING_SNAKE_CASE_ ( self :str , UpperCamelCase__ :PreTrainedTokenizer , UpperCamelCase__ :int = -1 , UpperCamelCase__ :int = -1 , UpperCamelCase__ :bool = False , UpperCamelCase__ :Optional[TensorType] = None , ):
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
_a = seq_length if not self.use_past else 1
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_a = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_a = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_a , _a = common_inputs["input_ids"].shape
_a = common_inputs["decoder_input_ids"].shape[1]
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = decoder_seq_length + 3
_a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_a = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
_a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_a , _a = self.num_layers
_a = min(UpperCamelCase__ , UpperCamelCase__ )
_a = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
_a = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
_a = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , UpperCamelCase__ :PreTrainedTokenizer , UpperCamelCase__ :int = -1 , UpperCamelCase__ :int = -1 , UpperCamelCase__ :bool = False , UpperCamelCase__ :Optional[TensorType] = None , ):
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_a , _a = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a , _a = self.num_layers
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = common_inputs["attention_mask"].dtype
_a = torch.cat(
[common_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
_a = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self :List[Any] , UpperCamelCase__ :PreTrainedTokenizer , UpperCamelCase__ :int = -1 , UpperCamelCase__ :int = -1 , UpperCamelCase__ :bool = False , UpperCamelCase__ :Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_a = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
_a = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
_a = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_a = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , UpperCamelCase__ :PreTrainedTokenizer , UpperCamelCase__ :int = -1 , UpperCamelCase__ :int = -1 , UpperCamelCase__ :bool = False , UpperCamelCase__ :Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
_a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
_a = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self :int , UpperCamelCase__ :List[str] , UpperCamelCase__ :List[Any] , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
_a = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
_a = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 388
| 1
|
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def A_ ( _lowerCAmelCase : SplitDict ):
"""simple docstring"""
_lowerCamelCase : List[Any] = split_dict._to_yaml_list()
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = SplitDict._from_yaml_list(_lowerCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_lowerCamelCase : Any = None
# the split name of split_dict takes over the name of the split info object
_lowerCamelCase : Optional[int] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=_lowerCAmelCase ), SplitInfo(dataset_name="my_dataset" )] )
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 11
|
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict,__A : List[str],__A : Any=False,__A : Tuple=False,__A : Dict=False,__A : str=None,__A : List[str]=None,__A : Any=None,__A : str=None,__A : Optional[Dict[str, Any]] = None,**__A : str,):
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCamelCase : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCamelCase : Tuple = "<|endoftext|>" if eos_token is None else eos_token
_lowerCamelCase : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCamelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
_lowerCamelCase : str = eos_token if bos_token is None else bos_token
else:
_lowerCamelCase : List[str] = "<pad>" if pad_token is None else pad_token
_lowerCamelCase : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,pad_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : Union[str, Any] = do_lower_case
_lowerCamelCase : List[Any] = remove_space
_lowerCamelCase : str = keep_accents
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCamelCase : Union[str, Any] = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCamelCase : int = re.compile(
f'[{"".join(map(__A,list(range(0,9 ) ) + list(range(1_1,3_2 ) ) + list(range(1_2_7,1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Dict ):
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple,__A : int ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : int ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Union[str, Any] = self.non_printing_characters_re.sub("",__A )
# Normalize whitespaces
_lowerCamelCase : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCamelCase : Optional[Any] = unicodedata.normalize("NFC",__A )
return text
def lowerCamelCase_ ( self : Union[str, Any],__A : str,**__A : Optional[int] ):
_lowerCamelCase : str = self.preprocess_text(__A )
return self.sp_model.encode(__A,out_type=__A )
def lowerCamelCase_ ( self : int,__A : str ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def lowerCamelCase_ ( __A : str ):
return out_string
def lowerCamelCase_ ( self : str,__A : List[str] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : str = False
out_string += self.sp_model.decode(__A )
return out_string
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, List[str]],__A : Union[str, bool] = False ):
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A )
else:
_lowerCamelCase : List[str] = [self.preprocess_text(__A ) for t in text]
_lowerCamelCase : int = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowerCamelCase : str = torch.tensor(__A )
return token_ids
def lowerCamelCase_ ( self : List[Any],__A : Union[int, List[int]] ):
return self.sp_model.decode(__A )
def lowerCamelCase_ ( self : Optional[int],__A : "Conversation" ):
_lowerCamelCase : Any = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCamelCase : Tuple = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__A ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__A )
| 11
| 1
|
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (KDPMaDiscreteScheduler,)
SCREAMING_SNAKE_CASE_ = 10
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = {
'num_train_timesteps': 1100,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCamelCase_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_ = sample.to(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.prev_sample
lowerCamelCase_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def UpperCamelCase( self ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_ = sample.to(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.prev_sample
lowerCamelCase_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def UpperCamelCase( self ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(self.num_inference_steps , device=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter.to(SCREAMING_SNAKE_CASE_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase_ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.prev_sample
lowerCamelCase_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
if str(SCREAMING_SNAKE_CASE_ ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 42
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__SCREAMING_SNAKE_CASE : Union[str, Any] =False
class A_ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A_ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase = """A painting of a squirrel eating a burger """
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case__ )
lowercase = VersatileDiffusionTextToImagePipeline.from_pretrained(snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase = generator.manual_seed(0 )
lowercase = pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowercase = VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase = """A painting of a squirrel eating a burger """
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
lowercase = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 428
| 0
|
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
__lowercase = parser.parse_args()
__lowercase = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 563
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class _lowercase ( __lowerCamelCase ):
_lowercase : Optional[Any] = ['pixel_values']
def __init__( self : List[str] , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Dict[str, int]] = None , lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[int, float] = 1 / 2_5_5 , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , **lowerCamelCase__ : str , ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
A_ = size if size is not None else {'''shortest_edge''': 2_5_6}
A_ = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
A_ = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
A_ = get_size_dict(lowerCamelCase__ )
A_ = do_resize
A_ = size
A_ = resample
A_ = do_center_crop
A_ = crop_size
A_ = do_rescale
A_ = rescale_factor
A_ = do_normalize
A_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase ( self : List[Any] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
A_ = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
A_ = get_resize_output_image_size(lowerCamelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCamelCase__ )
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCamelCase ( self : Optional[int] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
A_ = get_size_dict(lowerCamelCase__ )
return center_crop(lowerCamelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCamelCase ( self : Any , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : float , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Union[str, Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Dict , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCamelCase ( self : Optional[int] , lowerCamelCase__ : ImageInput , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[float] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCamelCase__ : Dict , ) -> Dict:
"""simple docstring"""
A_ = do_resize if do_resize is not None else self.do_resize
A_ = size if size is not None else self.size
A_ = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
A_ = resample if resample is not None else self.resample
A_ = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ = crop_size if crop_size is not None else self.crop_size
A_ = get_size_dict(lowerCamelCase__ )
A_ = do_rescale if do_rescale is not None else self.do_rescale
A_ = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ = do_normalize if do_normalize is not None else self.do_normalize
A_ = image_mean if image_mean is not None else self.image_mean
A_ = image_std if image_std is not None else self.image_std
A_ = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
A_ = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A_ = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
A_ = [self.center_crop(image=lowerCamelCase__ , size=lowerCamelCase__ ) for image in images]
if do_rescale:
A_ = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A_ = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
A_ = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
A_ = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
| 563
| 1
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( UpperCAmelCase_ ):
"""simple docstring"""
A_ = ["""image_processor""", """tokenizer"""]
A_ = """LayoutLMv2ImageProcessor"""
A_ = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase )-> str:
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCamelCase__ , )
lowercase__ = kwargs.pop('''feature_extractor''' )
lowercase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , **_lowerCamelCase , )-> int:
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowercase__ = self.image_processor(images=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase__ = features['''words''']
lowercase__ = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel values
lowercase__ = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase__ = self.get_overflowing_images(UpperCamelCase__ , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase__ = images
return encoded_inputs
def snake_case_( self , _lowerCamelCase , _lowerCamelCase )-> Union[str, Any]:
lowercase__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(UpperCamelCase__ )} and {len(UpperCamelCase__ )}''' )
return images_with_overflow
def snake_case_( self , *_lowerCamelCase , **_lowerCamelCase )-> Dict:
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_( self , *_lowerCamelCase , **_lowerCamelCase )-> str:
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def snake_case_( self )-> List[str]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def snake_case_( self )-> str:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCamelCase__ , )
return self.image_processor_class
@property
def snake_case_( self )-> Union[str, Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCamelCase__ , )
return self.image_processor
| 161
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase ={
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 337
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : List[Any] = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 711
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : TransformeraDModel , UpperCamelCase__ : AutoencoderKL , UpperCamelCase__ : KarrasDiffusionSchedulers , UpperCamelCase__ : Optional[Dict[int, str]] = None , ):
"""simple docstring"""
super().__init__()
self.register_modules(transformer=UpperCamelCase__ , vae=UpperCamelCase__ , scheduler=UpperCamelCase__ )
# create a imagenet -> id dictionary for easier use
UpperCamelCase = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(',' ):
UpperCamelCase = int(UpperCamelCase__ )
UpperCamelCase = dict(sorted(self.labels.items() ) )
def A ( self : Tuple , UpperCamelCase__ : Union[str, List[str]] ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = list(UpperCamelCase__ )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : float = 4.0 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : int = 5_0 , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = len(UpperCamelCase__ )
UpperCamelCase = self.transformer.config.sample_size
UpperCamelCase = self.transformer.config.in_channels
UpperCamelCase = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=UpperCamelCase__ , device=self.device , dtype=self.transformer.dtype , )
UpperCamelCase = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
UpperCamelCase = torch.tensor(UpperCamelCase__ , device=self.device ).reshape(-1 )
UpperCamelCase = torch.tensor([1_0_0_0] * batch_size , device=self.device )
UpperCamelCase = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(UpperCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
UpperCamelCase = latent_model_input[: len(UpperCamelCase__ ) // 2]
UpperCamelCase = torch.cat([half, half] , dim=0 )
UpperCamelCase = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = t
if not torch.is_tensor(UpperCamelCase__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
UpperCamelCase = latent_model_input.device.type == 'mps'
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = torch.floataa if is_mps else torch.floataa
else:
UpperCamelCase = torch.intaa if is_mps else torch.intaa
UpperCamelCase = torch.tensor([timesteps] , dtype=UpperCamelCase__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
UpperCamelCase = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
UpperCamelCase = self.transformer(
UpperCamelCase__ , timestep=UpperCamelCase__ , class_labels=UpperCamelCase__ ).sample
# perform guidance
if guidance_scale > 1:
UpperCamelCase , UpperCamelCase = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
UpperCamelCase , UpperCamelCase = torch.split(UpperCamelCase__ , len(UpperCamelCase__ ) // 2 , dim=0 )
UpperCamelCase = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
UpperCamelCase = torch.cat([half_eps, half_eps] , dim=0 )
UpperCamelCase = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
UpperCamelCase , UpperCamelCase = torch.split(UpperCamelCase__ , UpperCamelCase__ , dim=1 )
else:
UpperCamelCase = noise_pred
# compute previous image: x_t -> x_t-1
UpperCamelCase = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
if guidance_scale > 1:
UpperCamelCase , UpperCamelCase = latent_model_input.chunk(2 , dim=0 )
else:
UpperCamelCase = latent_model_input
UpperCamelCase = 1 / self.vae.config.scaling_factor * latents
UpperCamelCase = self.vae.decode(UpperCamelCase__ ).sample
UpperCamelCase = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 324
| 0
|
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCamelCase_ : int = datasets.utils.logging.get_logger(__name__)
class _a ( folder_based_builder.FolderBasedBuilderConfig ):
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : int = None
class _a ( folder_based_builder.FolderBasedBuilder ):
SCREAMING_SNAKE_CASE_ : List[Any] = datasets.Audio()
SCREAMING_SNAKE_CASE_ : str = """audio"""
SCREAMING_SNAKE_CASE_ : int = AudioFolderConfig
SCREAMING_SNAKE_CASE_ : List[str] = 42 # definition at the bottom of the script
SCREAMING_SNAKE_CASE_ : Optional[Any] = AudioClassification(audio_column="""audio""" , label_column="""label""" )
UpperCamelCase_ : str = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
UpperCamelCase_ : int = AUDIO_EXTENSIONS
| 185
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase):
"""simple docstring"""
lowerCAmelCase_ = StableDiffusionSAGPipeline
lowerCAmelCase_ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ = False
def UpperCamelCase__ ( self : List[str] ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCamelCase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
_UpperCamelCase =DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
_UpperCamelCase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCamelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCamelCase =CLIPTextModel(UpperCamelCase__ )
_UpperCamelCase =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCamelCase ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase__ ( self : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any]=0 ) -> Tuple:
if str(UpperCamelCase__ ).startswith('''mps''' ):
_UpperCamelCase =torch.manual_seed(UpperCamelCase__ )
else:
_UpperCamelCase =torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
_UpperCamelCase ={
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self : Union[str, Any] ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self : str ) -> int:
_UpperCamelCase =StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_UpperCamelCase =sag_pipe.to(UpperCamelCase__ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_UpperCamelCase ='''.'''
_UpperCamelCase =torch.manual_seed(0 )
_UpperCamelCase =sag_pipe(
[prompt] , generator=UpperCamelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
_UpperCamelCase =output.images
_UpperCamelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase =np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def UpperCamelCase__ ( self : int ) -> Tuple:
_UpperCamelCase =StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_UpperCamelCase =sag_pipe.to(UpperCamelCase__ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_UpperCamelCase ='''.'''
_UpperCamelCase =torch.manual_seed(0 )
_UpperCamelCase =sag_pipe(
[prompt] , generator=UpperCamelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
_UpperCamelCase =output.images
_UpperCamelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase =np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def UpperCamelCase__ ( self : str ) -> Tuple:
_UpperCamelCase =StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_UpperCamelCase =sag_pipe.to(UpperCamelCase__ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_UpperCamelCase ='''.'''
_UpperCamelCase =torch.manual_seed(0 )
_UpperCamelCase =sag_pipe(
[prompt] , width=768 , height=512 , generator=UpperCamelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
_UpperCamelCase =output.images
assert image.shape == (1, 512, 768, 3)
| 404
| 0
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : List[str] = (DPMSolverSinglestepScheduler,)
snake_case__ : Dict = (('''num_inference_steps''', 25),)
def a_ ( self , **a__ ):
__SCREAMING_SNAKE_CASE : List[Any] = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**a__ )
return config
def a_ ( self , a__=0 , **a__ ):
__SCREAMING_SNAKE_CASE : str = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop("num_inference_steps" , a__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample
__SCREAMING_SNAKE_CASE : Dict = 0.1 * sample
__SCREAMING_SNAKE_CASE : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE : str = self.get_scheduler_config(**a__ )
__SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
__SCREAMING_SNAKE_CASE : List[str] = scheduler_class.from_pretrained(a__ )
new_scheduler.set_timesteps(a__ )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
__SCREAMING_SNAKE_CASE : Optional[int] = sample, sample
for t in range(a__ , time_step + scheduler.config.solver_order + 1 ):
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
__SCREAMING_SNAKE_CASE : Dict = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a_ ( self ):
pass
def a_ ( self , a__=0 , **a__ ):
__SCREAMING_SNAKE_CASE : List[Any] = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("num_inference_steps" , a__ )
__SCREAMING_SNAKE_CASE : Tuple = self.dummy_sample
__SCREAMING_SNAKE_CASE : List[Any] = 0.1 * sample
__SCREAMING_SNAKE_CASE : int = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : str = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals (must be after setting timesteps)
__SCREAMING_SNAKE_CASE : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class.from_pretrained(a__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a__ )
# copy over dummy past residual (must be after setting timesteps)
__SCREAMING_SNAKE_CASE : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
__SCREAMING_SNAKE_CASE : Tuple = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
__SCREAMING_SNAKE_CASE : List[Any] = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a_ ( self , a__=None , **a__ ):
if scheduler is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config(**a__ )
__SCREAMING_SNAKE_CASE : int = scheduler_class(**a__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config(**a__ )
__SCREAMING_SNAKE_CASE : int = scheduler_class(**a__ )
__SCREAMING_SNAKE_CASE : List[Any] = 10
__SCREAMING_SNAKE_CASE : Any = self.dummy_model()
__SCREAMING_SNAKE_CASE : Any = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE : Dict = model(a__ , a__ )
__SCREAMING_SNAKE_CASE : Any = scheduler.step(a__ , a__ , a__ ).prev_sample
return sample
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__SCREAMING_SNAKE_CASE : Any = 50
__SCREAMING_SNAKE_CASE : Tuple = self.dummy_model()
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__SCREAMING_SNAKE_CASE : List[str] = model(a__ , a__ )
__SCREAMING_SNAKE_CASE : int = scheduler.step(a__ , a__ , a__ ).prev_sample
__SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2574 ) < 1e-3
def a_ ( self ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def a_ ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__SCREAMING_SNAKE_CASE : List[Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__SCREAMING_SNAKE_CASE : Any = self.full_loop(scheduler=a__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
__SCREAMING_SNAKE_CASE : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config )
__SCREAMING_SNAKE_CASE : str = DPMSolverMultistepScheduler.from_config(scheduler.config )
__SCREAMING_SNAKE_CASE : Any = UniPCMultistepScheduler.from_config(scheduler.config )
__SCREAMING_SNAKE_CASE : Optional[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.full_loop(scheduler=a__ )
__SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def a_ ( self ):
self.check_over_configs(thresholding=a__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=a__ , prediction_type=a__ , sample_max_value=a__ , algorithm_type="dpmsolver++" , solver_order=a__ , solver_type=a__ , )
def a_ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def a_ ( self ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
__SCREAMING_SNAKE_CASE : List[str] = self.full_loop(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
assert not torch.isnan(a__ ).any(), "Samples have nan numbers"
def a_ ( self ):
self.check_over_configs(lower_order_final=a__ )
self.check_over_configs(lower_order_final=a__ )
def a_ ( self ):
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def a_ ( self ):
self.check_over_configs(variance_type=a__ )
self.check_over_configs(variance_type="learned_range" )
def a_ ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=a__ , time_step=0 )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.full_loop()
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = self.full_loop(use_karras_sigmas=a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2248 ) < 1e-3
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.full_loop(prediction_type="v_prediction" )
__SCREAMING_SNAKE_CASE : List[str] = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.1453 ) < 1e-3
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=a__ )
__SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.0649 ) < 1e-3
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config(thresholding=a__ , dynamic_thresholding_ratio=0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**a__ )
__SCREAMING_SNAKE_CASE : List[str] = 10
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE : int = model(a__ , a__ )
__SCREAMING_SNAKE_CASE : List[str] = scheduler.step(a__ , a__ , a__ ).prev_sample
assert sample.dtype == torch.floataa
| 705
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __A ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ):
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
def __A ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ):
"""simple docstring"""
if dataset.ndim != value_array.ndim:
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
"Wrong input data's dimensions... "
f'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
try:
if dataset.shape[1] != value_array.shape[1]:
__SCREAMING_SNAKE_CASE : Optional[int] = (
"Wrong input data's shape... "
f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
__SCREAMING_SNAKE_CASE : Dict = (
"Input data have different datatype... "
f'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for value in value_array:
__SCREAMING_SNAKE_CASE : int = euclidean(_SCREAMING_SNAKE_CASE , dataset[0] )
__SCREAMING_SNAKE_CASE : int = dataset[0].tolist()
for dataset_value in dataset[1:]:
__SCREAMING_SNAKE_CASE : List[str] = euclidean(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if dist > temp_dist:
__SCREAMING_SNAKE_CASE : str = temp_dist
__SCREAMING_SNAKE_CASE : List[str] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __A ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ):
"""simple docstring"""
return np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) / (norm(_SCREAMING_SNAKE_CASE ) * norm(_SCREAMING_SNAKE_CASE ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 564
| 0
|
'''simple docstring'''
import os
from pathlib import Path
def UpperCamelCase ( ) -> Any:
'''simple docstring'''
from torch.utils.cpp_extension import load
lowercase =Path(lowercase_ ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
lowercase =[
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , lowercase_ , with_cuda=lowercase_ , extra_include_paths=[str(lowercase_ )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 72
|
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCamelCase : List[str] = logging.get_logger(__name__)
class _lowercase( enum.Enum ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = 1
@add_end_docstrings(_lowerCamelCase )
class _lowercase( _lowerCamelCase ):
"""simple docstring"""
__lowerCamelCase = '''generated'''
def __init__( self: int ,*a: Optional[Any] ,**a: Optional[Any] ):
super().__init__(*a ,**a )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def snake_case ( self: Tuple ,a: Union[str, Any]=None ,a: Any=None ,a: Tuple=None ,a: Tuple=None ,a: Any=None ,a: List[Any]=None ,**a: Any ,):
__UpperCAmelCase = {}
if truncation is not None:
__UpperCAmelCase = truncation
__UpperCAmelCase = generate_kwargs
__UpperCAmelCase = {}
if return_tensors is not None and return_type is None:
__UpperCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
__UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
__UpperCAmelCase = self.tokenizer.encode(a ,add_special_tokens=a )
if len(a ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def snake_case ( self: Optional[int] ,a: int ,a: int ,a: int ):
return True
def snake_case ( self: List[Any] ,*a: Tuple ,a: int ):
__UpperCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] ,a ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
__UpperCAmelCase = ([prefix + arg for arg in args[0]],)
__UpperCAmelCase = True
elif isinstance(args[0] ,a ):
__UpperCAmelCase = (prefix + args[0],)
__UpperCAmelCase = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
__UpperCAmelCase = self.tokenizer(*a ,padding=a ,truncation=a ,return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self: Union[str, Any] ,*a: Optional[int] ,**a: Optional[Any] ):
__UpperCAmelCase = super().__call__(*a ,**a )
if (
isinstance(args[0] ,a )
and all(isinstance(a ,a ) for el in args[0] )
and all(len(a ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def snake_case ( self: Optional[int] ,a: Union[str, Any] ,a: List[str]=TruncationStrategy.DO_NOT_TRUNCATE ,**a: Any ):
__UpperCAmelCase = self._parse_and_tokenize(a ,truncation=a ,**a )
return inputs
def snake_case ( self: Union[str, Any] ,a: Optional[int] ,**a: Tuple ):
if self.framework == "pt":
__UpperCAmelCase , __UpperCAmelCase = model_inputs['input_ids'].shape
elif self.framework == "tf":
__UpperCAmelCase , __UpperCAmelCase = tf.shape(model_inputs['input_ids'] ).numpy()
__UpperCAmelCase = generate_kwargs.get('min_length' ,self.model.config.min_length )
__UpperCAmelCase = generate_kwargs.get('max_length' ,self.model.config.max_length )
self.check_inputs(a ,generate_kwargs['min_length'] ,generate_kwargs['max_length'] )
__UpperCAmelCase = self.model.generate(**a ,**a )
__UpperCAmelCase = output_ids.shape[0]
if self.framework == "pt":
__UpperCAmelCase = output_ids.reshape(a ,out_b // in_b ,*output_ids.shape[1:] )
elif self.framework == "tf":
__UpperCAmelCase = tf.reshape(a ,(in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def snake_case ( self: List[str] ,a: Tuple ,a: Optional[int]=ReturnType.TEXT ,a: Optional[Any]=False ):
__UpperCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__UpperCAmelCase = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
__UpperCAmelCase = {
f"""{self.return_name}_text""": self.tokenizer.decode(
a ,skip_special_tokens=a ,clean_up_tokenization_spaces=a ,)
}
records.append(a )
return records
@add_end_docstrings(_lowerCamelCase )
class _lowercase( _lowerCamelCase ):
"""simple docstring"""
__lowerCamelCase = '''summary'''
def __call__( self: Dict ,*a: Optional[Any] ,**a: int ):
return super().__call__(*a ,**a )
def snake_case ( self: Optional[Any] ,a: int ,a: int ,a: int ):
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'a summarization task, where outputs shorter than the input are typically wanted, you might '
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_lowerCamelCase )
class _lowercase( _lowerCamelCase ):
"""simple docstring"""
__lowerCamelCase = '''translation'''
def snake_case ( self: Optional[Any] ,a: int ,a: int ,a: int ):
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def snake_case ( self: Dict ,*a: Dict ,a: Any=TruncationStrategy.DO_NOT_TRUNCATE ,a: Optional[int]=None ,a: Any=None ):
if getattr(self.tokenizer ,'_build_translation_inputs' ,a ):
return self.tokenizer._build_translation_inputs(
*a ,return_tensors=self.framework ,truncation=a ,src_lang=a ,tgt_lang=a )
else:
return super()._parse_and_tokenize(*a ,truncation=a )
def snake_case ( self: Union[str, Any] ,a: List[str]=None ,a: Union[str, Any]=None ,**a: Optional[int] ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = super()._sanitize_parameters(**a )
if src_lang is not None:
__UpperCAmelCase = src_lang
if tgt_lang is not None:
__UpperCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__UpperCAmelCase = kwargs.get('task' ,self.task )
__UpperCAmelCase = task.split('_' )
if task and len(a ) == 4:
# translation, XX, to YY
__UpperCAmelCase = items[1]
__UpperCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self: Optional[Any] ,*a: Union[str, Any] ,**a: List[str] ):
return super().__call__(*a ,**a )
| 396
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase :Dict = logging.get_logger(__name__)
__lowerCAmelCase :Optional[Any] = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class _a( __lowerCAmelCase ):
lowerCamelCase__ :Dict = "nllb-moe"
lowerCamelCase__ :Any = ["past_key_values"]
lowerCamelCase__ :Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __snake_case=1_2_8_1_1_2 , __snake_case=1_0_2_4 , __snake_case=1_2 , __snake_case=4_0_9_6 , __snake_case=1_6 , __snake_case=1_2 , __snake_case=4_0_9_6 , __snake_case=1_6 , __snake_case=0.05 , __snake_case=0.05 , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=1_0_2_4 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=2 , __snake_case=True , __snake_case=False , __snake_case="float32" , __snake_case=False , __snake_case=1_2_8 , __snake_case=6_4 , __snake_case=4 , __snake_case=4 , __snake_case=0.0_01 , __snake_case=0.0_01 , __snake_case="all" , __snake_case=False , __snake_case=False , __snake_case=1.0 , __snake_case=0.2 , __snake_case=1 , __snake_case=0 , __snake_case=2 , __snake_case=False , **__snake_case , ) -> Tuple:
'''simple docstring'''
_snake_case : Optional[int] = vocab_size
_snake_case : List[Any] = max_position_embeddings
_snake_case : str = d_model
_snake_case : Union[str, Any] = encoder_ffn_dim
_snake_case : Optional[int] = encoder_layers
_snake_case : Tuple = encoder_attention_heads
_snake_case : Union[str, Any] = decoder_ffn_dim
_snake_case : Optional[int] = decoder_layers
_snake_case : List[str] = decoder_attention_heads
_snake_case : Union[str, Any] = dropout
_snake_case : Union[str, Any] = attention_dropout
_snake_case : Tuple = activation_dropout
_snake_case : List[str] = activation_function
_snake_case : int = init_std
_snake_case : str = encoder_layerdrop
_snake_case : Any = decoder_layerdrop
_snake_case : str = use_cache
_snake_case : str = encoder_layers
_snake_case : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_snake_case : Optional[Any] = router_z_loss_coef
_snake_case : List[str] = router_aux_loss_coef
_snake_case : Optional[int] = decoder_sparse_step
_snake_case : List[str] = encoder_sparse_step
_snake_case : Any = num_experts
_snake_case : int = expert_capacity
_snake_case : Optional[int] = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
_snake_case : Tuple = router_dtype
_snake_case : Optional[Any] = router_ignore_padding_tokens
_snake_case : Dict = batch_prioritized_routing
_snake_case : Union[str, Any] = second_expert_policy
_snake_case : Dict = normalize_router_prob_before_dropping
_snake_case : List[str] = moe_eval_capacity_token_fraction
_snake_case : Dict = moe_token_dropout
_snake_case : Any = output_router_logits
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
| 718
|
from math import factorial
__lowerCAmelCase :dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def A ( UpperCAmelCase ):
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(UpperCAmelCase ) )
def A ( UpperCAmelCase = 60 , UpperCAmelCase = 1_000_000 ):
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
_snake_case : Optional[int] = 0
# the cached sizes of the previous chains
_snake_case : dict[int, int] = {}
for start_chain_element in range(1 , UpperCAmelCase ):
# The temporary set will contain the elements of the chain
_snake_case : Union[str, Any] = set()
_snake_case : Tuple = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
_snake_case : str = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(UpperCAmelCase )
chain_set_length += 1
_snake_case : Optional[Any] = digit_factorial_sum(UpperCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
_snake_case : List[Any] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution()}''')
| 278
| 0
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a ( A__ ) -> List[Any]:
'''simple docstring'''
for param in module.parameters():
SCREAMING_SNAKE_CASE__ : Optional[int] = False
def a ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
SCREAMING_SNAKE_CASE__ : List[str] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def a ( A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = plt.imshow(A__ )
fig.axes.get_xaxis().set_visible(A__ )
fig.axes.get_yaxis().set_visible(A__ )
plt.show()
def a ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = datetime.now()
SCREAMING_SNAKE_CASE__ : Optional[int] = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 35
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A_ (__a , __a , __a , __a , __a ):
'''simple docstring'''
with open(__a ) as metadata_file:
A_ = json.load(__a )
A_ = LukeConfig(use_entity_aware_attention=__a , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
A_ = torch.load(__a , map_location="cpu" )
# Load the entity vocab file
A_ = load_entity_vocab(__a )
A_ = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("<ent>" , lstrip=__a , rstrip=__a )
A_ = AddedToken("<ent2>" , lstrip=__a , rstrip=__a )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(__a )
with open(os.path.join(__a , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__a , __a )
A_ = LukeTokenizer.from_pretrained(__a )
# Initialize the embeddings of the special tokens
A_ = state_dict["embeddings.word_embeddings.weight"]
A_ = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
A_ = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = f'encoder.layer.{layer_index}.attention.self.'
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["entity_embeddings.entity_embeddings.weight"]
A_ = entity_emb[entity_vocab["[MASK]"]]
A_ = LukeModel(config=__a ).eval()
A_ , A_ = model.load_state_dict(__a , strict=__a )
if not (len(__a ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'Missing keys {", ".join(__a )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
f' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
A_ = LukeTokenizer.from_pretrained(__a , task="entity_classification" )
A_ = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
A_ = (39, 42)
A_ = tokenizer(__a , entity_spans=[span] , add_prefix_space=__a , return_tensors="pt" )
A_ = model(**__a )
# Verify word hidden states
if model_size == "large":
A_ = torch.Size((1, 42, 1024) )
A_ = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
A_ = torch.Size((1, 42, 768) )
A_ = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
A_ = torch.Size((1, 1, 1024) )
A_ = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
A_ = torch.Size((1, 1, 768) )
A_ = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __a , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__a ) )
model.save_pretrained(__a )
def A_ (__a ):
'''simple docstring'''
A_ = {}
with open(__a , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(__a ):
A_ , A_ = line.rstrip().split("\t" )
A_ = index
return entity_vocab
if __name__ == "__main__":
UpperCamelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
UpperCamelCase_ : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 115
| 0
|
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
(1_0_0_0, """M"""),
(9_0_0, """CM"""),
(5_0_0, """D"""),
(4_0_0, """CD"""),
(1_0_0, """C"""),
(9_0, """XC"""),
(5_0, """L"""),
(4_0, """XL"""),
(1_0, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def _A ( lowerCamelCase ):
a__ : str = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
a__ : List[Any] = 0
a__ : Any = 0
while place < len(lowerCamelCase ):
if (place + 1 < len(lowerCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def _A ( lowerCamelCase ):
a__ : str = []
for arabic, roman in ROMAN:
((a__) , (a__)) : Any = divmod(lowerCamelCase , lowerCamelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : int = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def snake_case_ ( A_ : Callable[[int | float], int | float], A_ : int | float, A_ : int | float, A_ : int = 1_00, ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = x_start
_lowerCamelCase : int = fnc(A_ )
_lowerCamelCase : Optional[Any] = 0.0
for _ in range(A_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_lowerCamelCase : int = (x_end - x_start) / steps + xa
_lowerCamelCase : Tuple = fnc(A_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_lowerCamelCase : Optional[Any] = xa
_lowerCamelCase : Any = fxa
return area
if __name__ == "__main__":
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
lowerCAmelCase__ = 10
while i <= 100000:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 83
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __a ( unittest.TestCase ):
_lowerCAmelCase : int = ViTImageProcessor if is_vision_available() else None
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[str] = (3, 32, 1_28)
UpperCamelCase__ : str = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : Optional[int] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
UpperCamelCase__ : Tuple = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
UpperCamelCase__ : Any = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 1_28},
}
UpperCamelCase__ : Tuple = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowercase ( self : int , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
UpperCamelCase__ : str = Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) )
return image_input
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.get_tokenizer()
UpperCamelCase__ : Tuple = self.get_image_processor()
UpperCamelCase__ : Union[str, Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Any = self.get_tokenizer()
UpperCamelCase__ : Union[str, Any] = self.get_image_processor()
UpperCamelCase__ : int = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCamelCase__ : List[Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
UpperCamelCase__ : Optional[Any] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = self.prepare_image_inputs()
UpperCamelCase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" )
UpperCamelCase__ : Optional[Any] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Union[str, Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = "test"
UpperCamelCase__ : Optional[Any] = processor(text=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = tokenizer(SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Optional[Any] = self.get_tokenizer()
UpperCamelCase__ : int = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = "test"
UpperCamelCase__ : int = self.prepare_image_inputs()
UpperCamelCase__ : Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE ):
processor()
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : Optional[int] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : Any = processor.char_decode(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : List[Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = None
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : Optional[Any] = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Dict = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : int = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = torch.randn(1 , 27 , 38 )
UpperCamelCase__ : Any = torch.randn(1 , 27 , 5_02_57 )
UpperCamelCase__ : Optional[Any] = torch.randn(1 , 27 , 3_05_22 )
UpperCamelCase__ : List[str] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 228
| 0
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , lowercase : int , lowercase : int=13 , lowercase : Union[str, Any]=2 , lowercase : Tuple=24 , lowercase : int=16 , lowercase : Any=True , lowercase : Dict=True , lowercase : Optional[int]=32 , lowercase : str=5 , lowercase : Union[str, Any]=4 , lowercase : Optional[int]=37 , lowercase : List[Any]="gelu" , lowercase : Optional[int]=0.1 , lowercase : Dict=0.1 , lowercase : Optional[int]=10 , lowercase : str=0.02 , lowercase : Optional[int]=None , lowercase : str=2 , lowercase : Optional[Any]=2 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = patch_size
__lowercase = max_length
__lowercase = num_mel_bins
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = scope
__lowercase = frequency_stride
__lowercase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowercase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__lowercase = (self.max_length - self.patch_size) // self.time_stride + 1
__lowercase = frequency_out_dimension * time_out_dimension
__lowercase = num_patches + 2
def snake_case__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, input_values, labels
def snake_case__ ( self : str ) -> str:
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def snake_case__ ( self : Optional[int] , lowercase : Any , lowercase : List[Any] , lowercase : str ) -> Tuple:
"""simple docstring"""
__lowercase = ASTModel(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowercase__ : str = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
lowercase__ : List[str] = False
lowercase__ : List[Any] = False
lowercase__ : Any = False
lowercase__ : Dict = False
def snake_case__ ( self : List[str] , lowercase : int , lowercase : Any , lowercase : Optional[int] , lowercase : str , lowercase : str ) -> Union[str, Any]:
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def snake_case__ ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase = ASTModelTester(self )
__lowercase = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def snake_case__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""input_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def snake_case__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
@slow
def snake_case__ ( self : List[str] ) -> str:
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = ASTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def UpperCAmelCase__ ( ) -> int:
__lowercase = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
__lowercase , __lowercase = torchaudio.load(lowercase__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def snake_case__ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = self.default_feature_extractor
__lowercase = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(lowercase )
__lowercase = self.default_feature_extractor
__lowercase , __lowercase = prepare_audio()
__lowercase = audio.squeeze().numpy()
__lowercase = feature_extractor(lowercase , sampling_rate=lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase )
# verify the logits
__lowercase = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , lowercase )
__lowercase = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
| 634
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : str = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowercase__ : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
lowercase__ : ClassVar[Features] = Features({"""summary""": Value("""string""" )} )
lowercase__ : str = "text"
lowercase__ : str = "summary"
@property
def snake_case__ ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 634
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.