code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCamelCase__ ( __UpperCamelCase , unittest.TestCase):
'''simple docstring'''
_A = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _lowerCamelCase ( self :List[str] , a :int=0 ) -> int:
__UpperCamelCase : List[str] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(a ) )
__UpperCamelCase : Optional[int] = np.random.RandomState(a )
__UpperCamelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _lowerCamelCase ( self :str ) -> Optional[int]:
__UpperCamelCase : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=a )
__UpperCamelCase : int = self.get_dummy_inputs()
__UpperCamelCase : Any = pipe(**a ).images
__UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCamelCase : Optional[Any] = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _lowerCamelCase ( self :Optional[Any] ) -> List[str]:
__UpperCamelCase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__UpperCamelCase : Any = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a )
pipe.set_progress_bar_config(disable=a )
__UpperCamelCase : List[str] = self.get_dummy_inputs()
__UpperCamelCase : Optional[int] = pipe(**a ).images
__UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCamelCase : Optional[Any] = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowerCamelCase ( self :Dict ) -> Union[str, Any]:
__UpperCamelCase : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__UpperCamelCase : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
# warmup pass to apply optimizations
__UpperCamelCase : List[str] = pipe(**self.get_dummy_inputs() )
__UpperCamelCase : str = self.get_dummy_inputs()
__UpperCamelCase : Union[str, Any] = pipe(**a ).images
__UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCamelCase : List[str] = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowerCamelCase ( self :str ) -> Optional[Any]:
__UpperCamelCase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__UpperCamelCase : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
__UpperCamelCase : int = self.get_dummy_inputs()
__UpperCamelCase : Optional[Any] = pipe(**a ).images
__UpperCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCamelCase : List[Any] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowerCamelCase ( self :Tuple ) -> Optional[Any]:
__UpperCamelCase : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__UpperCamelCase : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
__UpperCamelCase : Any = self.get_dummy_inputs()
__UpperCamelCase : Union[str, Any] = pipe(**a ).images
__UpperCamelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCamelCase : Optional[Any] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowerCamelCase ( self :str ) -> List[Any]:
__UpperCamelCase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__UpperCamelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
__UpperCamelCase : str = self.get_dummy_inputs()
__UpperCamelCase : Dict = pipe(**a ).images
__UpperCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCamelCase : Optional[int] = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@property
def _lowerCamelCase ( self :Optional[int] ) -> Optional[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCamelCase ( self :List[Any] ) -> Any:
__UpperCamelCase : List[Any] = ort.SessionOptions()
__UpperCamelCase : Optional[Any] = False
return options
def _lowerCamelCase ( self :Optional[Any] ) -> str:
__UpperCamelCase : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__UpperCamelCase : List[Any] = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
__UpperCamelCase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
__UpperCamelCase : str = 'A fantasy landscape, trending on artstation'
__UpperCamelCase : Union[str, Any] = np.random.RandomState(0 )
__UpperCamelCase : int = pipe(
prompt=a , image=a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=a , output_type="np" , )
__UpperCamelCase : Optional[Any] = output.images
__UpperCamelCase : Any = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
__UpperCamelCase : List[Any] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowerCamelCase ( self :Optional[Any] ) -> List[Any]:
__UpperCamelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__UpperCamelCase : str = init_image.resize((7_6_8, 5_1_2) )
__UpperCamelCase : Union[str, Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
__UpperCamelCase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
__UpperCamelCase : Union[str, Any] = 'A fantasy landscape, trending on artstation'
__UpperCamelCase : Optional[int] = np.random.RandomState(0 )
__UpperCamelCase : Optional[Any] = pipe(
prompt=a , image=a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=a , output_type="np" , )
__UpperCamelCase : List[str] = output.images
__UpperCamelCase : Any = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
__UpperCamelCase : List[str] = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 | 557 |
import numpy as np
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] ):
__lowerCamelCase : int = (0, 0)
__lowerCamelCase : List[str] = None
__lowerCamelCase : int = 0
__lowerCamelCase : int = 0
__lowerCamelCase : Union[str, Any] = 0
def __eq__( self: Optional[int] , a: List[Any] ):
return self.position == cell.position
def _snake_case ( self: Any ):
print(self.position )
class A_ :
'''simple docstring'''
def __init__( self: str , a: List[str]=(5, 5) ):
__lowerCamelCase : Optional[Any] = np.zeros(a )
__lowerCamelCase : List[str] = world_size[0]
__lowerCamelCase : Optional[int] = world_size[1]
def _snake_case ( self: List[Any] ):
print(self.w )
def _snake_case ( self: Optional[int] , a: str ):
__lowerCamelCase : Tuple = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__lowerCamelCase : Optional[int] = cell.position[0]
__lowerCamelCase : List[str] = cell.position[1]
__lowerCamelCase : Dict = []
for n in neughbour_cord:
__lowerCamelCase : Dict = current_x + n[0]
__lowerCamelCase : Optional[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__lowerCamelCase : Optional[Any] = Cell()
__lowerCamelCase : Any = (x, y)
__lowerCamelCase : Dict = cell
neighbours.append(a )
return neighbours
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = []
__lowerCamelCase : int = []
_open.append(SCREAMING_SNAKE_CASE__ )
while _open:
__lowerCamelCase : Union[str, Any] = np.argmin([n.f for n in _open] )
__lowerCamelCase : int = _open[min_f]
_closed.append(_open.pop(SCREAMING_SNAKE_CASE__ ) )
if current == goal:
break
for n in world.get_neigbours(SCREAMING_SNAKE_CASE__ ):
for c in _closed:
if c == n:
continue
__lowerCamelCase : Optional[int] = current.g + 1
__lowerCamelCase , __lowerCamelCase : int = n.position
__lowerCamelCase , __lowerCamelCase : Tuple = goal.position
__lowerCamelCase : Dict = (ya - ya) ** 2 + (xa - xa) ** 2
__lowerCamelCase : str = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = []
while current.parent is not None:
path.append(current.position )
__lowerCamelCase : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowercase_ = Gridworld()
# Start position and goal
lowercase_ = Cell()
lowercase_ = (0, 0)
lowercase_ = Cell()
lowercase_ = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
lowercase_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowercase_ = 1
print(world.w)
| 669 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_snake_case : Union[str, Any] = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(__UpperCamelCase )
class _UpperCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
a_ = """rag"""
a_ = True
def __init__( self : Dict , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[Any]=" / " , lowerCAmelCase_ : Dict=" // " , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[int]=3_0_0 , lowerCAmelCase_ : str=7_6_8 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : List[Any]="wiki_dpr" , lowerCAmelCase_ : Any="train" , lowerCAmelCase_ : Optional[Any]="compressed" , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : List[str] , ) -> List[Any]:
super().__init__(
bos_token_id=lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , forced_eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , prefix=lowerCAmelCase_ , vocab_size=lowerCAmelCase_ , **lowerCAmelCase_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__lowerCAmelCase = kwargs.pop('question_encoder' )
__lowerCAmelCase = question_encoder_config.pop('model_type' )
__lowerCAmelCase = kwargs.pop('generator' )
__lowerCAmelCase = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
__lowerCAmelCase = AutoConfig.for_model(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = AutoConfig.for_model(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = reduce_loss
__lowerCAmelCase = label_smoothing
__lowerCAmelCase = exclude_bos_score
__lowerCAmelCase = do_marginalize
__lowerCAmelCase = title_sep
__lowerCAmelCase = doc_sep
__lowerCAmelCase = n_docs
__lowerCAmelCase = max_combined_length
__lowerCAmelCase = dataset
__lowerCAmelCase = dataset_split
__lowerCAmelCase = index_name
__lowerCAmelCase = retrieval_vector_size
__lowerCAmelCase = retrieval_batch_size
__lowerCAmelCase = passages_path
__lowerCAmelCase = index_path
__lowerCAmelCase = use_dummy_dataset
__lowerCAmelCase = output_retrieved
__lowerCAmelCase = do_deduplication
__lowerCAmelCase = use_cache
if self.forced_eos_token_id is None:
__lowerCAmelCase = getattr(self.generator , 'forced_eos_token_id' , lowerCAmelCase_ )
@classmethod
def lowercase ( cls : Dict , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : Union[str, Any] ) -> Dict:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> Optional[int]:
__lowerCAmelCase = copy.deepcopy(self.__dict__ )
__lowerCAmelCase = self.question_encoder.to_dict()
__lowerCAmelCase = self.generator.to_dict()
__lowerCAmelCase = self.__class__.model_type
return output
| 53 |
import math
from datetime import datetime, timedelta
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = year % 19
__lowerCamelCase : int = year % 4
__lowerCamelCase : Any = year % 7
__lowerCamelCase : Dict = math.floor(year / 100 )
__lowerCamelCase : str = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__lowerCamelCase : Optional[int] = leap_day_inhibits / 4
__lowerCamelCase : str = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__lowerCamelCase : Optional[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowerCamelCase : Optional[int] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__lowerCamelCase : Tuple = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 18 )
else:
return datetime(SCREAMING_SNAKE_CASE__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowercase_ = 'will be' if year > datetime.now().year else 'was'
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 669 | 0 |
'''simple docstring'''
from PIL import Image
def UpperCAmelCase_ (__a : Tuple , __a : Optional[int] ):
"""simple docstring"""
def brightness(__a : Any ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
__lowerCAmelCase = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 229 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A_ ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__snake_case = 1
@register_to_config
def __init__( self: str , a: str=2000 , a: List[str]=0.1 , a: Any=20 , a: Dict=1e-3 ):
__lowerCamelCase : Dict = None
__lowerCamelCase : Any = None
__lowerCamelCase : Optional[int] = None
def _snake_case ( self: int , a: str , a: Union[str, torch.device] = None ):
__lowerCamelCase : int = torch.linspace(1 , self.config.sampling_eps , a , device=a )
def _snake_case ( self: List[Any] , a: Union[str, Any] , a: Tuple , a: Optional[Any] , a: Dict=None ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowerCamelCase : Tuple = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowerCamelCase : Optional[int] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowerCamelCase : Optional[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowerCamelCase : List[str] = std.unsqueeze(-1 )
__lowerCamelCase : Any = -score / std
# compute
__lowerCamelCase : List[Any] = -1.0 / len(self.timesteps )
__lowerCamelCase : Any = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowerCamelCase : Dict = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowerCamelCase : int = beta_t.unsqueeze(-1 )
__lowerCamelCase : Any = -0.5 * beta_t * x
__lowerCamelCase : List[Any] = torch.sqrt(a )
__lowerCamelCase : Tuple = drift - diffusion**2 * score
__lowerCamelCase : str = x + drift * dt
# add noise
__lowerCamelCase : Any = randn_tensor(x.shape , layout=x.layout , generator=a , device=x.device , dtype=x.dtype )
__lowerCamelCase : Any = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self: Optional[int] ):
return self.config.num_train_timesteps
| 669 | 0 |
"""simple docstring"""
# Function to print upper half of diamond (pyramid)
def lowerCamelCase_ (UpperCamelCase__ : Dict ):
for i in range(0 , SCREAMING_SNAKE_CASE__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def lowerCamelCase_ (UpperCamelCase__ : List[Any] ):
for i in range(SCREAMING_SNAKE_CASE__ , 0 , -1 ):
for _ in range(SCREAMING_SNAKE_CASE__ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ):
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(SCREAMING_SNAKE_CASE__ ) # upper half
reverse_floyd(SCREAMING_SNAKE_CASE__ ) # lower half
if __name__ == "__main__":
print(R'| /\ | |- | |- |--| |\ /| |-')
print(R'|/ \| |- |_ |_ |__| | \/ | |_')
_lowerCAmelCase :Optional[Any] = 1
while K:
_lowerCAmelCase :Union[str, Any] = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
_lowerCAmelCase :Optional[Any] = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 506 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = int(SCREAMING_SNAKE_CASE__ )
if n_element < 1:
__lowerCamelCase : str = ValueError('a should be a positive number' )
raise my_error
__lowerCamelCase : Tuple = [1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = (0, 0, 0)
__lowerCamelCase : Any = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowercase_ = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
lowercase_ = hamming(int(n))
print('-----------------------------------------------------')
print(F"""The list with nth numbers is: {hamming_numbers}""")
print('-----------------------------------------------------')
| 669 | 0 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
for char in word:
__lowerCamelCase : List[str] =ord(SCREAMING_SNAKE_CASE__ )
if not _is_chinese_char(SCREAMING_SNAKE_CASE__ ):
return 0
return 1
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
__lowerCamelCase : Dict =set()
for token in tokens:
__lowerCamelCase : Optional[int] =len(SCREAMING_SNAKE_CASE__ ) > 1 and is_chinese(SCREAMING_SNAKE_CASE__ )
if chinese_word:
word_set.add(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Dict =list(SCREAMING_SNAKE_CASE__ )
return word_list
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
__lowerCamelCase : Tuple =max([len(SCREAMING_SNAKE_CASE__ ) for w in chinese_word_set] )
__lowerCamelCase : List[str] =bert_tokens
__lowerCamelCase : Optional[int] =0, len(SCREAMING_SNAKE_CASE__ )
while start < end:
__lowerCamelCase : Dict =True
if is_chinese(bert_word[start] ):
__lowerCamelCase : Union[str, Any] =min(end - start , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ , 1 , -1 ):
__lowerCamelCase : List[str] =''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__lowerCamelCase : Optional[int] ='##' + bert_word[j]
__lowerCamelCase : Dict =start + i
__lowerCamelCase : Dict =False
break
if single_word:
start += 1
return bert_word
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : List[Any] =[]
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 100 ):
__lowerCamelCase : List[Any] =ltp_tokenizer.seg(lines[i : i + 100] )[0]
__lowerCamelCase : Tuple =[get_chinese_word(SCREAMING_SNAKE_CASE__ ) for r in res]
ltp_res.extend(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[Any] =[]
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 100 ):
__lowerCamelCase : Any =bert_tokenizer(lines[i : i + 100] , add_special_tokens=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : str =[]
for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] =[]
for id in input_ids:
__lowerCamelCase : Union[str, Any] =bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE__ )
input_tokens.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Tuple =add_sub_symbol(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : str =[]
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
if token[:2] == "##":
__lowerCamelCase : str =token[2:]
# save chinese tokens' pos
if len(SCREAMING_SNAKE_CASE__ ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE__ ) ):
ref_id.append(SCREAMING_SNAKE_CASE__ )
ref_ids.append(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
return ref_ids
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
__lowerCamelCase : Union[str, Any] =f.readlines()
__lowerCamelCase : Optional[int] =[line.strip() for line in data if len(SCREAMING_SNAKE_CASE__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__lowerCamelCase : List[Any] =LTP(args.ltp ) # faster in GPU device
__lowerCamelCase : Optional[Any] =BertTokenizer.from_pretrained(args.bert )
__lowerCamelCase : Dict =prepare_ref(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
__lowerCamelCase : Any =[json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' for ref in ref_ids]
f.writelines(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
_UpperCamelCase = parser.parse_args()
main(args)
| 179 |
import unittest
from knapsack import greedy_knapsack as kp
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = [10, 20, 30, 40, 50, 60]
__lowerCamelCase : List[str] = [2, 4, 6, 8, 10, 12]
__lowerCamelCase : Tuple = 100
self.assertEqual(kp.calc_profit(a , a , a ) , 210 )
def _snake_case ( self: str ):
self.assertRaisesRegex(a , 'max_weight must greater than zero.' )
def _snake_case ( self: List[str] ):
self.assertRaisesRegex(a , 'Weight can not be negative.' )
def _snake_case ( self: Dict ):
self.assertRaisesRegex(a , 'Profit can not be negative.' )
def _snake_case ( self: List[str] ):
self.assertRaisesRegex(a , 'max_weight must greater than zero.' )
def _snake_case ( self: Any ):
self.assertRaisesRegex(
a , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 669 | 0 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( __UpperCamelCase , unittest.TestCase ):
__A : Tuple = CodeGenTokenizer
__A : Dict = CodeGenTokenizerFast
__A : List[Any] = True
__A : Any = {'add_prefix_space': True}
__A : Optional[Any] = False
def UpperCAmelCase_ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
lowerCAmelCase_ = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
lowerCAmelCase_ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCAmelCase_ = {'unk_token': '<unk>'}
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
def UpperCAmelCase_ ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def UpperCAmelCase_ ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ):
lowerCAmelCase_ = 'lower newer'
lowerCAmelCase_ = 'lower newer'
return input_text, output_text
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ = 'lower newer'
lowerCAmelCase_ = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowerCAmelCase_ = tokenizer.tokenize(_lowerCamelCase , add_prefix_space=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = tokens + [tokenizer.unk_token]
lowerCAmelCase_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def UpperCAmelCase_ ( self ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer(add_prefix_space=_lowerCamelCase )
lowerCAmelCase_ = 'lower newer'
# Testing tokenization
lowerCAmelCase_ = tokenizer.tokenize(_lowerCamelCase , add_prefix_space=_lowerCamelCase )
lowerCAmelCase_ = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Testing conversion to ids without special tokens
lowerCAmelCase_ = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase , add_prefix_space=_lowerCamelCase )
lowerCAmelCase_ = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Testing conversion to ids with special tokens
lowerCAmelCase_ = self.get_rust_tokenizer(add_prefix_space=_lowerCamelCase )
lowerCAmelCase_ = tokenizer.encode(_lowerCamelCase , add_prefix_space=_lowerCamelCase )
lowerCAmelCase_ = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Testing the unknown token
lowerCAmelCase_ = tokens + [rust_tokenizer.unk_token]
lowerCAmelCase_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def UpperCAmelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def UpperCAmelCase_ ( self , _lowerCamelCase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
# Simple input
lowerCAmelCase_ = 'This is a simple input'
lowerCAmelCase_ = ['This is a simple input 1', 'This is a simple input 2']
lowerCAmelCase_ = ('This is a simple input', 'This is a pair')
lowerCAmelCase_ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding='''max_length''' )
# Simple input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding='''max_length''' )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='''max_length''' , )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
lowerCAmelCase_ = 'This is a simple input'
lowerCAmelCase_ = ['This is a simple input looooooooong', 'This is a simple input']
lowerCAmelCase_ = ('This is a simple input', 'This is a pair')
lowerCAmelCase_ = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
lowerCAmelCase_ = tokenizer.pad_token_id
lowerCAmelCase_ = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
lowerCAmelCase_ = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , truncate=_lowerCamelCase , return_tensors='''np''' )
lowerCAmelCase_ = tokenizer(*_lowerCamelCase , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
lowerCAmelCase_ = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , truncate=_lowerCamelCase , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '$$$'
lowerCAmelCase_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=_lowerCamelCase , add_bos_token=_lowerCamelCase )
lowerCAmelCase_ = 'This is a simple input'
lowerCAmelCase_ = ['This is a simple input 1', 'This is a simple input 2']
lowerCAmelCase_ = tokenizer.bos_token_id
lowerCAmelCase_ = tokenizer(_lowerCamelCase )
lowerCAmelCase_ = tokenizer(_lowerCamelCase )
self.assertEqual(out_s.input_ids[0] , _lowerCamelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCAmelCase_ = tokenizer.decode(out_s.input_ids )
lowerCAmelCase_ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _lowerCamelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
lowerCAmelCase_ = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
lowerCAmelCase_ = '\nif len_a > len_b: result = a\nelse: result = b'
lowerCAmelCase_ = tokenizer.encode(_lowerCamelCase )
lowerCAmelCase_ = ['^#', re.escape('''<|endoftext|>''' ), '^\'\'\'', '^"""', '\n\n\n']
lowerCAmelCase_ = tokenizer.decode(_lowerCamelCase , truncate_before_pattern=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ):
pass
| 274 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any]=2 , a: str=3 , a: Any=4 , a: Union[str, Any]=2 , a: Tuple=7 , a: int=True , a: Tuple=True , a: List[str]=True , a: Union[str, Any]=True , a: str=99 , a: Tuple=36 , a: int=2 , a: Dict=4 , a: Union[str, Any]=37 , a: List[str]="gelu" , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Dict=512 , a: Union[str, Any]=16 , a: str=2 , a: int=0.0_2 , a: Optional[Any]=6 , a: Optional[int]=6 , a: Dict=3 , a: Optional[Any]=4 , a: Optional[Any]=None , a: Dict=1000 , ):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : Optional[int] = num_channels
__lowerCamelCase : str = image_size
__lowerCamelCase : int = patch_size
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : Dict = use_input_mask
__lowerCamelCase : Any = use_token_type_ids
__lowerCamelCase : List[str] = use_labels
__lowerCamelCase : str = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : List[Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : Dict = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : int = type_sequence_label_size
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : List[str] = coordinate_size
__lowerCamelCase : int = shape_size
__lowerCamelCase : Union[str, Any] = num_labels
__lowerCamelCase : int = num_choices
__lowerCamelCase : int = scope
__lowerCamelCase : Any = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCamelCase : Any = text_seq_length
__lowerCamelCase : Optional[Any] = (image_size // patch_size) ** 2 + 1
__lowerCamelCase : Any = self.text_seq_length + self.image_seq_length
def _snake_case ( self: List[str] ):
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCamelCase : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCamelCase : List[str] = bbox[i, j, 3]
__lowerCamelCase : str = bbox[i, j, 1]
__lowerCamelCase : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCamelCase : Tuple = bbox[i, j, 2]
__lowerCamelCase : Any = bbox[i, j, 0]
__lowerCamelCase : List[str] = tmp_coordinate
__lowerCamelCase : str = tf.constant(a )
__lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Any = None
if self.use_input_mask:
__lowerCamelCase : int = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCamelCase : Tuple = None
if self.use_token_type_ids:
__lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = None
if self.use_labels:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCamelCase : Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self: Tuple , a: List[Any] , a: Any , a: List[str] , a: Dict , a: Optional[Any] , a: Dict ):
__lowerCamelCase : Optional[Any] = TFLayoutLMvaModel(config=a )
# text + image
__lowerCamelCase : Optional[Any] = model(a , pixel_values=a , training=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , training=a , )
__lowerCamelCase : List[Any] = model(a , bbox=a , pixel_values=a , training=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCamelCase : List[Any] = model(a , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCamelCase : Optional[Any] = model({'pixel_values': pixel_values} , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self: Dict , a: Dict , a: Optional[Any] , a: int , a: Optional[int] , a: List[str] , a: List[str] , a: List[str] ):
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : str = TFLayoutLMvaForSequenceClassification(config=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Tuple , a: Optional[Any] , a: List[Any] ):
__lowerCamelCase : Union[str, Any] = self.num_labels
__lowerCamelCase : Any = TFLayoutLMvaForTokenClassification(config=a )
__lowerCamelCase : Optional[Any] = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self: Dict , a: Optional[Any] , a: str , a: Dict , a: Union[str, Any] , a: List[Any] , a: Optional[int] , a: List[str] ):
__lowerCamelCase : List[Any] = 2
__lowerCamelCase : Any = TFLayoutLMvaForQuestionAnswering(config=a )
__lowerCamelCase : Any = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , training=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = self.prepare_config_and_inputs()
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = config_and_inputs
__lowerCamelCase : Tuple = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def _snake_case ( self: int , a: List[str] , a: Any , a: Optional[Any] , a: Tuple , a: Tuple ):
return True
def _snake_case ( self: str , a: Any , a: Any , a: Optional[int]=False ):
__lowerCamelCase : List[str] = copy.deepcopy(a )
if model_class in get_values(a ):
__lowerCamelCase : Tuple = {
k: tf.tile(tf.expand_dims(a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a ):
__lowerCamelCase : Any = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Dict = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _snake_case ( self: Tuple ):
__lowerCamelCase : int = TFLayoutLMvaModelTester(self )
__lowerCamelCase : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _snake_case ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : int = model_class(a )
if getattr(a , 'hf_compute_loss' , a ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCamelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=a )[0]
]
__lowerCamelCase : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCamelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : Dict = prepared_for_class.pop('input_ids' )
__lowerCamelCase : str = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCamelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : List[str] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__lowerCamelCase : int = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCamelCase : Tuple = -100
__lowerCamelCase : Tuple = tf.convert_to_tensor(a )
__lowerCamelCase : Tuple = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCamelCase : int = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : str = model(a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCamelCase : str = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
# Get keys that were added with the _prepare_for_class function
__lowerCamelCase : Optional[Any] = prepared_for_class.keys() - inputs_dict.keys()
__lowerCamelCase : List[Any] = inspect.signature(model.call ).parameters
__lowerCamelCase : List[str] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCamelCase : Optional[int] = {0: 'input_ids'}
for label_key in label_keys:
__lowerCamelCase : Dict = signature_names.index(a )
__lowerCamelCase : str = label_key
__lowerCamelCase : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCamelCase : Optional[int] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCamelCase : Optional[int] = prepared_for_class[value]
__lowerCamelCase : Any = tuple(a )
# Send to model
__lowerCamelCase : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _snake_case ( self: List[str] ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: int ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: Dict ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
a , a , a , a , a , a , a )
@slow
def _snake_case ( self: int ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Dict = TFLayoutLMvaModel.from_pretrained(a )
self.assertIsNotNone(a )
def UpperCamelCase__ ( ):
__lowerCamelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self: Optional[int] ):
return LayoutLMvaImageProcessor(apply_ocr=a ) if is_vision_available() else None
@slow
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Tuple = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__lowerCamelCase : Union[str, Any] = self.default_image_processor
__lowerCamelCase : List[Any] = prepare_img()
__lowerCamelCase : str = image_processor(images=a , return_tensors='tf' ).pixel_values
__lowerCamelCase : Union[str, Any] = tf.constant([[1, 2]] )
__lowerCamelCase : str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCamelCase : int = model(input_ids=a , bbox=a , pixel_values=a , training=a )
# verify the logits
__lowerCamelCase : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , a )
__lowerCamelCase : Any = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1e-4 ) )
| 669 | 0 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
A = False
class __snake_case ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def UpperCAmelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Any = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion', torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowerCamelCase : Dict = torch.manual_seed(0 )
lowerCamelCase : List[str] = pipe.dual_guided(
prompt='first prompt', image=A, text_to_image_strength=0.75, generator=A, guidance_scale=7.5, num_inference_steps=2, output_type='numpy', ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
lowerCamelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained(A, torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase : Union[str, Any] = generator.manual_seed(0 )
lowerCamelCase : Dict = pipe.dual_guided(
prompt='first prompt', image=A, text_to_image_strength=0.75, generator=A, guidance_scale=7.5, num_inference_steps=2, output_type='numpy', ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion', torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase : str = 'cyberpunk 2077'
lowerCamelCase : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowerCamelCase : str = torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = pipe.dual_guided(
prompt=A, image=A, text_to_image_strength=0.75, generator=A, guidance_scale=7.5, num_inference_steps=50, output_type='numpy', ).images
lowerCamelCase : Any = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : int = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCamelCase : Optional[Any] = 'A painting of a squirrel eating a burger '
lowerCamelCase : Union[str, Any] = torch.manual_seed(0 )
lowerCamelCase : Dict = pipe.text_to_image(
prompt=A, generator=A, guidance_scale=7.5, num_inference_steps=50, output_type='numpy' ).images
lowerCamelCase : str = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : List[str] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCamelCase : List[str] = pipe.image_variation(A, generator=A, output_type='numpy' ).images
lowerCamelCase : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : str = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 320 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = CLIPTokenizer
__snake_case = CLIPTokenizerFast
__snake_case = True
__snake_case = {}
__snake_case = False
def _snake_case ( self: Union[str, Any] ):
super().setUp()
# fmt: off
__lowerCamelCase : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__lowerCamelCase : Tuple = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
__lowerCamelCase : Tuple = {'unk_token': '<unk>'}
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _snake_case ( self: Tuple , **a: Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Union[str, Any] , **a: List[str] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : Tuple = 'lower newer'
__lowerCamelCase : Tuple = 'lower newer'
return input_text, output_text
def _snake_case ( self: List[str] ):
__lowerCamelCase : List[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase : Optional[Any] = 'lower newer'
__lowerCamelCase : int = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
__lowerCamelCase : Optional[int] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase : int = tokens + [tokenizer.unk_token]
__lowerCamelCase : int = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@require_ftfy
def _snake_case ( self: Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : str = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
__lowerCamelCase : Optional[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[Any] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__lowerCamelCase : List[Any] = 'xa\u0303y' + ' ' + 'x\xe3y'
__lowerCamelCase : Tuple = tokenizer_s.tokenize(a )
__lowerCamelCase : Any = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of space type
__lowerCamelCase : List[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__lowerCamelCase : List[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[int] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of line break type
__lowerCamelCase : str = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__lowerCamelCase : Dict = tokenizer_s.tokenize(a )
__lowerCamelCase : List[str] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
def _snake_case ( self: List[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCamelCase : Optional[int] = F'{text_of_1_token} {text_of_1_token}'
__lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase : List[Any] = F' {text}'
__lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
def _snake_case ( self: str ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(a ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def _snake_case ( self: Tuple ):
super().test_tokenization_python_rust_equals()
def _snake_case ( self: Tuple ):
# CLIP always lower cases letters
pass
| 669 | 0 |
def A__ (snake_case : Optional[Any] ) -> str:
return "".join([hex(SCREAMING_SNAKE_CASE__ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE__ )] )
def A__ (snake_case : List[str] ) -> List[Any]:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE__ ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:\nData does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE__ ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowercase_ = False
try:
lowercase_ = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class A_ :
'''simple docstring'''
def __init__( self: int , a: str = None , a: list = [] ):
__lowerCamelCase : Dict = 0
__lowerCamelCase : Dict = choices
__lowerCamelCase : Tuple = prompt
if sys.platform == "win32":
__lowerCamelCase : Union[str, Any] = '*'
else:
__lowerCamelCase : Any = '➔ '
def _snake_case ( self: Any , a: Tuple , a: str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a )
else:
forceWrite(self.choices[index] , a )
def _snake_case ( self: Tuple , a: int ):
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(a )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _snake_case ( self: Optional[int] , a: Direction , a: int = 1 ):
__lowerCamelCase : str = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a )
move_cursor(a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def _snake_case ( self: Tuple ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def _snake_case ( self: Optional[int] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def _snake_case ( self: str ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def _snake_case ( self: Union[str, Any] ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a )] for number in range(10 )] )
def _snake_case ( self: str ):
__lowerCamelCase : List[Any] = int(chr(self.current_selection ) )
__lowerCamelCase : Any = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a )
else:
return
else:
return
def _snake_case ( self: str , a: int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
__lowerCamelCase : Dict = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
__lowerCamelCase : Any = int(builtins.input() )
except ValueError:
__lowerCamelCase : str = default_choice
else:
__lowerCamelCase : Optional[int] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(a , '\n' )
return choice
| 669 | 0 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __UpperCamelCase ( __UpperCamelCase ):
__A : int = ["""image_processor"""]
__A : str = """SamImageProcessor"""
def __init__( self , _UpperCamelCase ):
super().__init__(_UpperCamelCase )
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = -10
_UpperCAmelCase = self.image_processor.size['longest_edge']
def __call__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
# pop arguments that are not used in the foward but used nevertheless
_UpperCAmelCase = encoding_image_processor['original_sizes']
if hasattr(_UpperCamelCase , '''numpy''' ): # Checks if Torch or TF tensor
_UpperCAmelCase = original_sizes.numpy()
_UpperCAmelCase = self._check_and_preprocess_points(
input_points=_UpperCamelCase , input_labels=_UpperCamelCase , input_boxes=_UpperCamelCase , )
_UpperCAmelCase = self._normalize_and_convert(
_UpperCamelCase , _UpperCamelCase , input_points=_UpperCamelCase , input_labels=_UpperCamelCase , input_boxes=_UpperCamelCase , return_tensors=_UpperCamelCase , )
return encoding_image_processor
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="pt" , ):
if input_points is not None:
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
_UpperCAmelCase = [
self._normalize_coordinates(self.target_size , _UpperCamelCase , original_sizes[0] ) for point in input_points
]
else:
_UpperCAmelCase = [
self._normalize_coordinates(self.target_size , _UpperCamelCase , _UpperCamelCase )
for point, original_size in zip(_UpperCamelCase , _UpperCamelCase )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_UpperCAmelCase = self._pad_points_and_labels(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = np.array(_UpperCamelCase )
if input_labels is not None:
_UpperCAmelCase = np.array(_UpperCamelCase )
if input_boxes is not None:
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
_UpperCAmelCase = [
self._normalize_coordinates(self.target_size , _UpperCamelCase , original_sizes[0] , is_bounding_box=_UpperCamelCase )
for box in input_boxes
]
else:
_UpperCAmelCase = [
self._normalize_coordinates(self.target_size , _UpperCamelCase , _UpperCamelCase , is_bounding_box=_UpperCamelCase )
for box, original_size in zip(_UpperCamelCase , _UpperCamelCase )
]
_UpperCAmelCase = np.array(_UpperCamelCase )
if input_boxes is not None:
if return_tensors == "pt":
_UpperCAmelCase = torch.from_numpy(_UpperCamelCase )
# boxes batch size of 1 by default
_UpperCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_UpperCAmelCase = tf.convert_to_tensor(_UpperCamelCase )
# boxes batch size of 1 by default
_UpperCAmelCase = tf.expand_dims(_UpperCamelCase , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'''input_boxes''': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_UpperCAmelCase = torch.from_numpy(_UpperCamelCase )
# point batch size of 1 by default
_UpperCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_UpperCAmelCase = tf.convert_to_tensor(_UpperCamelCase )
# point batch size of 1 by default
_UpperCAmelCase = tf.expand_dims(_UpperCamelCase , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'''input_points''': input_points} )
if input_labels is not None:
if return_tensors == "pt":
_UpperCAmelCase = torch.from_numpy(_UpperCamelCase )
# point batch size of 1 by default
_UpperCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_UpperCAmelCase = tf.convert_to_tensor(_UpperCamelCase )
# point batch size of 1 by default
_UpperCAmelCase = tf.expand_dims(_UpperCamelCase , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'''input_labels''': input_labels} )
return encoding_image_processor
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = max([point.shape[0] for point in input_points] )
_UpperCAmelCase = []
for i, point in enumerate(_UpperCamelCase ):
if point.shape[0] != expected_nb_points:
_UpperCAmelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_UpperCAmelCase = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(_UpperCamelCase )
_UpperCAmelCase = processed_input_points
return input_points, input_labels
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
_UpperCAmelCase = original_size
_UpperCAmelCase = self.image_processor._get_preprocess_shape(_UpperCamelCase , longest_edge=_UpperCamelCase )
_UpperCAmelCase = deepcopy(_UpperCamelCase ).astype(_UpperCamelCase )
if is_bounding_box:
_UpperCAmelCase = coords.reshape(-1 , 2 , 2 )
_UpperCAmelCase = coords[..., 0] * (new_w / old_w)
_UpperCAmelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_UpperCAmelCase = coords.reshape(-1 , 4 )
return coords
def UpperCamelCase( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , ):
if input_points is not None:
if hasattr(_UpperCamelCase , '''numpy''' ): # Checks for TF or Torch tensor
_UpperCAmelCase = input_points.numpy().tolist()
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not isinstance(input_points[0] , _UpperCamelCase ):
raise ValueError('''Input points must be a list of list of floating points.''' )
_UpperCAmelCase = [np.array(_UpperCamelCase ) for input_point in input_points]
else:
_UpperCAmelCase = None
if input_labels is not None:
if hasattr(_UpperCamelCase , '''numpy''' ):
_UpperCAmelCase = input_labels.numpy().tolist()
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not isinstance(input_labels[0] , _UpperCamelCase ):
raise ValueError('''Input labels must be a list of list integers.''' )
_UpperCAmelCase = [np.array(_UpperCamelCase ) for label in input_labels]
else:
_UpperCAmelCase = None
if input_boxes is not None:
if hasattr(_UpperCamelCase , '''numpy''' ):
_UpperCAmelCase = input_boxes.numpy().tolist()
if (
not isinstance(_UpperCamelCase , _UpperCamelCase )
or not isinstance(input_boxes[0] , _UpperCamelCase )
or not isinstance(input_boxes[0][0] , _UpperCamelCase )
):
raise ValueError('''Input boxes must be a list of list of list of floating points.''' )
_UpperCAmelCase = [np.array(_UpperCamelCase ).astype(np.floataa ) for box in input_boxes]
else:
_UpperCAmelCase = None
return input_points, input_labels, input_boxes
@property
def UpperCamelCase( self ):
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(_UpperCamelCase ) )
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.image_processor.post_process_masks(*_UpperCamelCase , **_UpperCamelCase ) | 32 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = (CMStochasticIterativeScheduler,)
__snake_case = 10
def _snake_case ( self: Any , **a: Dict ):
__lowerCamelCase : Optional[Any] = {
'num_train_timesteps': 201,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
config.update(**a )
return config
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Any = 10
__lowerCamelCase : Any = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = self.scheduler_classes[0](**a )
scheduler.set_timesteps(a )
__lowerCamelCase : Any = scheduler.timesteps[0]
__lowerCamelCase : List[str] = scheduler.timesteps[1]
__lowerCamelCase : Union[str, Any] = self.dummy_sample
__lowerCamelCase : int = 0.1 * sample
__lowerCamelCase : Optional[Any] = scheduler.step(a , a , a ).prev_sample
__lowerCamelCase : List[str] = scheduler.step(a , a , a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self: Optional[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a )
def _snake_case ( self: List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : Tuple = self.scheduler_classes[0]
__lowerCamelCase : Tuple = self.get_scheduler_config()
__lowerCamelCase : Tuple = scheduler_class(**a )
__lowerCamelCase : int = 1
scheduler.set_timesteps(a )
__lowerCamelCase : Optional[int] = scheduler.timesteps
__lowerCamelCase : List[str] = torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(a ):
# 1. scale model input
__lowerCamelCase : List[str] = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Optional[int] = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : str = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : str = pred_prev_sample
__lowerCamelCase : List[str] = torch.sum(torch.abs(a ) )
__lowerCamelCase : str = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 1_9_2.7_6_1_4 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_1_0 ) < 1e-3
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Optional[Any] = self.get_scheduler_config()
__lowerCamelCase : int = scheduler_class(**a )
__lowerCamelCase : List[Any] = [106, 0]
scheduler.set_timesteps(timesteps=a )
__lowerCamelCase : Dict = scheduler.timesteps
__lowerCamelCase : int = torch.manual_seed(0 )
__lowerCamelCase : Any = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__lowerCamelCase : Tuple = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Tuple = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : Any = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : Any = pred_prev_sample
__lowerCamelCase : Dict = torch.sum(torch.abs(a ) )
__lowerCamelCase : Optional[Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 3_4_7.6_3_5_7 ) < 1e-2
assert abs(result_mean.item() - 0.4_5_2_7 ) < 1e-3
def _snake_case ( self: Tuple ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : int = self.get_scheduler_config()
__lowerCamelCase : List[Any] = scheduler_class(**a )
__lowerCamelCase : Optional[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(a , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a )
def _snake_case ( self: int ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [39, 30, 12, 1, 0]
__lowerCamelCase : List[Any] = len(a )
with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Dict = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a )
| 669 | 0 |
"""simple docstring"""
def a ( __snake_case : str, __snake_case : Union[str, Any] ):
'''simple docstring'''
return base * power(SCREAMING_SNAKE_CASE__, (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
__lowerCamelCase = int(input("Enter the base: ").strip())
__lowerCamelCase = int(input("Enter the exponent: ").strip())
__lowerCamelCase = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
__lowerCamelCase = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 608 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowercase_ = input('Enter image url: ').strip()
print(F"""Downloading image from {url} ...""")
lowercase_ = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
lowercase_ = soup.find('meta', {'property': 'og:image'})['content']
lowercase_ = requests.get(image_url).content
lowercase_ = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 669 | 0 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
@dataclass
class snake_case :
lowerCAmelCase__ :Dict = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCAmelCase__ :Union[str, Any] = field(
default=__UpperCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
lowerCAmelCase__ :str = field(
default=__UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
lowerCAmelCase__ :int = field(
default=__UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase__ :int = field(
default=__UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
lowerCAmelCase__ :Any = field(
default=__UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class snake_case :
lowerCAmelCase__ :Tuple = field(
default=__UpperCamelCase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase__ :Optional[int] = field(
default=__UpperCamelCase , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
lowerCAmelCase__ :Optional[int] = field(
default=__UpperCamelCase , metadata={"help": "Train language if it is different from the evaluation language."} )
lowerCAmelCase__ :Dict = field(
default=__UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase__ :List[Any] = field(
default=__UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCAmelCase__ :Tuple = field(
default=__UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCAmelCase__ :int = field(
default=__UpperCamelCase , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
lowerCAmelCase__ :Optional[Any] = field(
default=__UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCAmelCase__ :List[Any] = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase__ :int = field(
default=__UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCAmelCase__ :Dict = field(
default=__UpperCamelCase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" ,SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowercase__ = load_dataset(
"xnli" ,model_args.language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
lowercase__ = load_dataset(
"xnli" ,model_args.train_language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowercase__ = train_dataset.features['label'].names
if training_args.do_eval:
lowercase__ = load_dataset(
"xnli" ,model_args.language ,split="validation" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowercase__ = eval_dataset.features['label'].names
if training_args.do_predict:
lowercase__ = load_dataset(
"xnli" ,model_args.language ,split="test" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowercase__ = predict_dataset.features['label'].names
# Labels
lowercase__ = len(SCREAMING_SNAKE_CASE__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=SCREAMING_SNAKE_CASE__ ,idalabel={str(SCREAMING_SNAKE_CASE__ ): label for i, label in enumerate(SCREAMING_SNAKE_CASE__ )} ,labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )} ,finetuning_task="xnli" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowercase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,do_lower_case=model_args.do_lower_case ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowercase__ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=SCREAMING_SNAKE_CASE__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowercase__ = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase__ = False
def preprocess_function(_snake_case : Union[str, Any] ):
# Tokenize the texts
return tokenizer(
examples["premise"] ,examples["hypothesis"] ,padding=SCREAMING_SNAKE_CASE__ ,max_length=data_args.max_seq_length ,truncation=SCREAMING_SNAKE_CASE__ ,)
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase__ = min(len(SCREAMING_SNAKE_CASE__ ) ,data_args.max_train_samples )
lowercase__ = train_dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
lowercase__ = train_dataset.map(
SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__ ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on train dataset" ,)
# Log a few random samples from the training set:
for index in random.sample(range(len(SCREAMING_SNAKE_CASE__ ) ) ,3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase__ = min(len(SCREAMING_SNAKE_CASE__ ) ,data_args.max_eval_samples )
lowercase__ = eval_dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
lowercase__ = eval_dataset.map(
SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__ ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on validation dataset" ,)
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowercase__ = min(len(SCREAMING_SNAKE_CASE__ ) ,data_args.max_predict_samples )
lowercase__ = predict_dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
lowercase__ = predict_dataset.map(
SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__ ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on prediction dataset" ,)
# Get the metric function
lowercase__ = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_snake_case : Any ):
lowercase__ = p.predictions[0] if isinstance(p.predictions ,SCREAMING_SNAKE_CASE__ ) else p.predictions
lowercase__ = np.argmax(SCREAMING_SNAKE_CASE__ ,axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE__ ,references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase__ = default_data_collator
elif training_args.fpaa:
lowercase__ = DataCollatorWithPadding(SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=8 )
else:
lowercase__ = None
# Initialize our Trainer
lowercase__ = Trainer(
model=SCREAMING_SNAKE_CASE__ ,args=SCREAMING_SNAKE_CASE__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=SCREAMING_SNAKE_CASE__ ,tokenizer=SCREAMING_SNAKE_CASE__ ,data_collator=SCREAMING_SNAKE_CASE__ ,)
# Training
if training_args.do_train:
lowercase__ = None
if training_args.resume_from_checkpoint is not None:
lowercase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ = last_checkpoint
lowercase__ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
lowercase__ = train_result.metrics
lowercase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE__ )
)
lowercase__ = min(SCREAMING_SNAKE_CASE__ ,len(SCREAMING_SNAKE_CASE__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" ,SCREAMING_SNAKE_CASE__ )
trainer.save_metrics("train" ,SCREAMING_SNAKE_CASE__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowercase__ = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE__ )
lowercase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE__ )
lowercase__ = min(SCREAMING_SNAKE_CASE__ ,len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics("eval" ,SCREAMING_SNAKE_CASE__ )
trainer.save_metrics("eval" ,SCREAMING_SNAKE_CASE__ )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
lowercase__ = trainer.predict(SCREAMING_SNAKE_CASE__ ,metric_key_prefix="predict" )
lowercase__ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(SCREAMING_SNAKE_CASE__ )
)
lowercase__ = min(SCREAMING_SNAKE_CASE__ ,len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics("predict" ,SCREAMING_SNAKE_CASE__ )
trainer.save_metrics("predict" ,SCREAMING_SNAKE_CASE__ )
lowercase__ = np.argmax(SCREAMING_SNAKE_CASE__ ,axis=1 )
lowercase__ = os.path.join(training_args.output_dir ,"predictions.txt" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ ,"w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(SCREAMING_SNAKE_CASE__ ):
lowercase__ = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 267 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowercase_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowercase_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
lowercase_ = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
lowercase_ = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
lowercase_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
lowercase_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 669 | 0 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowercase : int = True
except ImportError:
lowercase : List[Any] = False
try:
from torch.hub import _get_torch_home
lowercase : List[str] = _get_torch_home()
except ImportError:
lowercase : Union[str, Any] = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
lowercase : Optional[int] = os.path.join(torch_cache_home, 'transformers')
lowercase : Tuple = 'https://cdn.huggingface.co'
lowercase : Dict = 'https://s3.amazonaws.com/models.huggingface.co/bert'
lowercase : Optional[Any] = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
lowercase : Tuple = os.path.join(PATH, 'config.yaml')
lowercase : str = os.path.join(PATH, 'attributes.txt')
lowercase : str = os.path.join(PATH, 'objects.txt')
lowercase : Dict = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
lowercase : Any = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
lowercase : List[str] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
lowercase : int = 'pytorch_model.bin'
lowercase : Tuple = 'config.yaml'
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any=OBJECTS , _lowerCamelCase : List[str]=ATTRIBUTES) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = []
with open(SCREAMING_SNAKE_CASE__) as f:
for object in f.readlines():
vg_classes.append(object.split(",")[0].lower().strip())
__UpperCamelCase : Optional[int] = []
with open(SCREAMING_SNAKE_CASE__) as f:
for object in f.readlines():
vg_attrs.append(object.split(",")[0].lower().strip())
return vg_classes, vg_attrs
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any) -> str:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = OrderedDict()
with open(SCREAMING_SNAKE_CASE__ , "rb") as f:
__UpperCamelCase : Dict = pkl.load(SCREAMING_SNAKE_CASE__)['model']
for k in copy.deepcopy(list(ckp.keys())):
__UpperCamelCase : List[Any] = ckp.pop(SCREAMING_SNAKE_CASE__)
if isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray):
__UpperCamelCase : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE__)
else:
assert isinstance(SCREAMING_SNAKE_CASE__ , torch.tensor), type(SCREAMING_SNAKE_CASE__)
__UpperCamelCase : Dict = v
return r
class lowerCamelCase__ :
'''simple docstring'''
_A = {}
def __init__( self :Union[str, Any] , a :dict , a :str = "root" , a :Union[str, Any]=0 ) -> int:
__UpperCamelCase : int = name
__UpperCamelCase : str = level
__UpperCamelCase : str = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCamelCase : str = copy.deepcopy(a )
__UpperCamelCase : List[str] = copy.deepcopy(a )
if isinstance(a , a ):
__UpperCamelCase : Dict = Config(a , name=a , level=level + 1 )
__UpperCamelCase : Optional[int] = v
setattr(self , a , a )
__UpperCamelCase : Union[str, Any] = d
def __repr__( self :Optional[int] ) -> str:
return str(list((self._pointer.keys()) ) )
def __setattr__( self :int , a :Tuple , a :Tuple ) -> Union[str, Any]:
__UpperCamelCase : Optional[Any] = val
__UpperCamelCase : List[Any] = val
__UpperCamelCase : int = key.split("." )
__UpperCamelCase : str = len(a ) - 1
__UpperCamelCase : Tuple = self._pointer
if len(a ) > 1:
for i, l in enumerate(a ):
if hasattr(self , a ) and isinstance(getattr(self , a ) , a ):
setattr(getattr(self , a ) , ".".join(levels[i:] ) , a )
if l == last_level:
__UpperCamelCase : List[Any] = val
else:
__UpperCamelCase : Any = pointer[l]
def _lowerCamelCase ( self :str ) -> Any:
return self._pointer
def _lowerCamelCase ( self :Optional[int] , a :List[Any] , a :List[Any] ) -> Dict:
with open(f'{file_name}' , "w" ) as stream:
dump(a , a )
def _lowerCamelCase ( self :str , a :List[str] , a :Optional[int] ) -> Dict:
with open(f'{file_name}' , "w" ) as stream:
json.dump(a , a )
@staticmethod
def _lowerCamelCase ( a :Optional[int] ) -> List[str]:
with open(a ) as stream:
__UpperCamelCase : Optional[int] = load(a , Loader=a )
return data
def __str__( self :int ) -> Optional[Any]:
__UpperCamelCase : List[str] = ' '
if self._name != "root":
__UpperCamelCase : List[str] = f'{t * (self._level-1)}{self._name}:\n'
else:
__UpperCamelCase : List[str] = ''
__UpperCamelCase : Union[str, Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(a , a ):
r += f'{t * (self._level)}{v}\n'
self._level += 1
else:
r += f'{t * (self._level)}{k}: {v} ({type(a ).__name__})\n'
__UpperCamelCase : Dict = level
return r[:-1]
@classmethod
def _lowerCamelCase ( cls :Union[str, Any] , a :str , **a :Optional[Any] ) -> Tuple:
__UpperCamelCase : Optional[int] = cls.get_config_dict(a , **a )
return cls(a )
@classmethod
def _lowerCamelCase ( cls :Dict , a :str , **a :Union[str, Any] ) -> List[str]:
__UpperCamelCase : Any = kwargs.pop("cache_dir" , a )
__UpperCamelCase : List[Any] = kwargs.pop("force_download" , a )
__UpperCamelCase : str = kwargs.pop("resume_download" , a )
__UpperCamelCase : str = kwargs.pop("proxies" , a )
__UpperCamelCase : Any = kwargs.pop("local_files_only" , a )
if os.path.isdir(a ):
__UpperCamelCase : List[str] = os.path.join(a , a )
elif os.path.isfile(a ) or is_remote_url(a ):
__UpperCamelCase : str = pretrained_model_name_or_path
else:
__UpperCamelCase : int = hf_bucket_url(a , filename=a , use_cdn=a )
try:
# Load from URL or cache if already cached
__UpperCamelCase : Any = cached_path(
a , cache_dir=a , force_download=a , proxies=a , resume_download=a , local_files_only=a , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCamelCase : str = Config.load_yaml(a )
except EnvironmentError:
__UpperCamelCase : List[Any] = 'Can\'t load config for'
raise EnvironmentError(a )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(a ), kwargs
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = torch.load("dump.pt" , map_location=in_tensor.device)
__UpperCamelCase : str = in_tensor.numpy()
__UpperCamelCase : Tuple = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5])
print(na.shape , na[0, 0, :5])
assert np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=0.0_1 , atol=0.1), (
F'{sum([1 for x in np.isclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=0.0_1 , atol=0.1).flatten() if x is False])/len(na.flatten())*100:.4f} %'
" element-wise mismatch"
)
raise Exception("tensors are all good")
# Hugging face functions below
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any]) -> Dict:
'''simple docstring'''
__UpperCamelCase : Tuple = urlparse(SCREAMING_SNAKE_CASE__)
return parsed.scheme in ("http", "https")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : List[Any]=True) -> List[str]:
'''simple docstring'''
__UpperCamelCase : List[Any] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCamelCase : Any = '/' not in model_id
if legacy_format:
return F'{endpoint}/{model_id}-{filename}'
else:
return F'{endpoint}/{model_id}/{filename}'
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict=None , _lowerCamelCase : Tuple=0 , _lowerCamelCase : List[str]=None , ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Any = 'python/{}'.format(sys.version.split()[0])
if _torch_available:
ua += "; torch/{}".format(torch.__version__)
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
ua += "; " + "; ".join("{}/{}".format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) for k, v in user_agent.items())
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
ua += "; " + user_agent
__UpperCamelCase : Dict = {'user-agent': ua}
if resume_size > 0:
__UpperCamelCase : Any = 'bytes=%d-' % (resume_size,)
__UpperCamelCase : Any = requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , headers=SCREAMING_SNAKE_CASE__)
if response.status_code == 416: # Range not satisfiable
return
__UpperCamelCase : Optional[Any] = response.headers.get("Content-Length")
__UpperCamelCase : str = resume_size + int(SCREAMING_SNAKE_CASE__) if content_length is not None else None
__UpperCamelCase : Dict = tqdm(
unit="B" , unit_scale=SCREAMING_SNAKE_CASE__ , total=SCREAMING_SNAKE_CASE__ , initial=SCREAMING_SNAKE_CASE__ , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=1_024):
if chunk: # filter out keep-alive new chunks
progress.update(len(SCREAMING_SNAKE_CASE__))
temp_file.write(SCREAMING_SNAKE_CASE__)
progress.close()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple=None , _lowerCamelCase : Dict=False , _lowerCamelCase : str=None , _lowerCamelCase : Dict=10 , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : str=False , ) -> Tuple:
'''simple docstring'''
if cache_dir is None:
__UpperCamelCase : List[Any] = TRANSFORMERS_CACHE
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__UpperCamelCase : Any = str(SCREAMING_SNAKE_CASE__)
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__)
__UpperCamelCase : int = None
if not local_files_only:
try:
__UpperCamelCase : List[Any] = requests.head(SCREAMING_SNAKE_CASE__ , allow_redirects=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , timeout=SCREAMING_SNAKE_CASE__)
if response.status_code == 200:
__UpperCamelCase : Tuple = response.headers.get("ETag")
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCamelCase : Dict = url_to_filename(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# get cache path to put the file
__UpperCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(SCREAMING_SNAKE_CASE__):
return cache_path
else:
__UpperCamelCase : List[str] = [
file
for file in fnmatch.filter(os.listdir(SCREAMING_SNAKE_CASE__) , filename + ".*")
if not file.endswith(".json") and not file.endswith(".lock")
]
if len(SCREAMING_SNAKE_CASE__) > 0:
return os.path.join(SCREAMING_SNAKE_CASE__ , matching_files[-1])
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set \'local_files_only\'"
" to False.")
return None
# From now on, etag is not None.
if os.path.exists(SCREAMING_SNAKE_CASE__) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCamelCase : Any = cache_path + '.lock'
with FileLock(SCREAMING_SNAKE_CASE__):
# If the download just completed while the lock was activated.
if os.path.exists(SCREAMING_SNAKE_CASE__) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCamelCase : Any = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(SCREAMING_SNAKE_CASE__ , "a+b") as f:
yield f
__UpperCamelCase : int = _resumable_file_manager
if os.path.exists(SCREAMING_SNAKE_CASE__):
__UpperCamelCase : List[str] = os.stat(SCREAMING_SNAKE_CASE__).st_size
else:
__UpperCamelCase : Any = 0
else:
__UpperCamelCase : Any = partial(tempfile.NamedTemporaryFile , dir=SCREAMING_SNAKE_CASE__ , delete=SCREAMING_SNAKE_CASE__)
__UpperCamelCase : Tuple = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , SCREAMING_SNAKE_CASE__ , temp_file.name , )
http_get(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_size=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , )
os.replace(temp_file.name , SCREAMING_SNAKE_CASE__)
__UpperCamelCase : int = {'url': url, 'etag': etag}
__UpperCamelCase : Optional[Any] = cache_path + '.json'
with open(SCREAMING_SNAKE_CASE__ , "w") as meta_file:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
return cache_path
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any]=None) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = url.encode("utf-8")
__UpperCamelCase : Tuple = shaaaa(SCREAMING_SNAKE_CASE__)
__UpperCamelCase : List[str] = url_hash.hexdigest()
if etag:
__UpperCamelCase : Union[str, Any] = etag.encode("utf-8")
__UpperCamelCase : Optional[Any] = shaaaa(SCREAMING_SNAKE_CASE__)
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5"):
filename += ".h5"
return filename
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : str=None , _lowerCamelCase : int=False , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=False , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=False , _lowerCamelCase : int=False , _lowerCamelCase : int=False , ) -> List[str]:
'''simple docstring'''
if cache_dir is None:
__UpperCamelCase : Optional[int] = TRANSFORMERS_CACHE
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__UpperCamelCase : Any = str(SCREAMING_SNAKE_CASE__)
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__UpperCamelCase : List[Any] = str(SCREAMING_SNAKE_CASE__)
if is_remote_url(SCREAMING_SNAKE_CASE__):
# URL, so get it from the cache (downloading if necessary)
__UpperCamelCase : Tuple = get_from_cache(
SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , )
elif os.path.exists(SCREAMING_SNAKE_CASE__):
# File, and it exists.
__UpperCamelCase : int = url_or_filename
elif urlparse(SCREAMING_SNAKE_CASE__).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(SCREAMING_SNAKE_CASE__))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(SCREAMING_SNAKE_CASE__))
if extract_compressed_file:
if not is_zipfile(SCREAMING_SNAKE_CASE__) and not tarfile.is_tarfile(SCREAMING_SNAKE_CASE__):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCamelCase : List[Any] = os.path.split(SCREAMING_SNAKE_CASE__)
__UpperCamelCase : str = output_file.replace("." , "-") + '-extracted'
__UpperCamelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if os.path.isdir(SCREAMING_SNAKE_CASE__) and os.listdir(SCREAMING_SNAKE_CASE__) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCamelCase : Tuple = output_path + '.lock'
with FileLock(SCREAMING_SNAKE_CASE__):
shutil.rmtree(SCREAMING_SNAKE_CASE__ , ignore_errors=SCREAMING_SNAKE_CASE__)
os.makedirs(SCREAMING_SNAKE_CASE__)
if is_zipfile(SCREAMING_SNAKE_CASE__):
with ZipFile(SCREAMING_SNAKE_CASE__ , "r") as zip_file:
zip_file.extractall(SCREAMING_SNAKE_CASE__)
zip_file.close()
elif tarfile.is_tarfile(SCREAMING_SNAKE_CASE__):
__UpperCamelCase : Dict = tarfile.open(SCREAMING_SNAKE_CASE__)
tar_file.extractall(SCREAMING_SNAKE_CASE__)
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(SCREAMING_SNAKE_CASE__))
return output_path_extracted
return output_path
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : Optional[int]=",") -> List[Any]:
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if os.path.isfile(SCREAMING_SNAKE_CASE__):
with open(SCREAMING_SNAKE_CASE__) as f:
__UpperCamelCase : Optional[Any] = eval(f.read())
else:
__UpperCamelCase : Tuple = requests.get(SCREAMING_SNAKE_CASE__)
try:
__UpperCamelCase : Tuple = requests.json()
except Exception:
__UpperCamelCase : str = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCamelCase : List[Any] = eval(SCREAMING_SNAKE_CASE__)
except Exception:
__UpperCamelCase : Optional[Any] = data.split("\n")
req.close()
return data
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> Dict:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = requests.get(SCREAMING_SNAKE_CASE__)
__UpperCamelCase : Optional[int] = np.array(Image.open(BytesIO(response.content)))
return img
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any]) -> int:
'''simple docstring'''
__UpperCamelCase : Dict = url.split("/")[-1]
if fn not in os.listdir(os.getcwd()):
wget.download(SCREAMING_SNAKE_CASE__)
with open(SCREAMING_SNAKE_CASE__ , "rb") as stream:
__UpperCamelCase : Dict = pkl.load(SCREAMING_SNAKE_CASE__)
__UpperCamelCase : Tuple = weights.pop("model")
__UpperCamelCase : Union[str, Any] = {}
for k, v in model.items():
__UpperCamelCase : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE__)
if "running_var" in k:
__UpperCamelCase : Optional[Any] = torch.tensor([0])
__UpperCamelCase : Any = k.replace("running_var" , "num_batches_tracked")
__UpperCamelCase : Any = zero
return new
def _SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
print(F'{os.path.abspath(os.path.join(SCREAMING_SNAKE_CASE__ , os.pardir))}/demo.ipynb')
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any]="RGB") -> Dict:
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if os.path.isfile(SCREAMING_SNAKE_CASE__):
__UpperCamelCase : Union[str, Any] = cva.imread(SCREAMING_SNAKE_CASE__)
else:
__UpperCamelCase : Optional[Any] = get_image_from_url(SCREAMING_SNAKE_CASE__)
assert img is not None, F'could not connect to: {im}'
__UpperCamelCase : Optional[Any] = cva.cvtColor(SCREAMING_SNAKE_CASE__ , cva.COLOR_BGR2RGB)
if input_format == "RGB":
__UpperCamelCase : List[str] = img[:, :, ::-1]
return img
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict=1) -> Optional[int]:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__)) | 557 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """xlm-roberta"""
def __init__( self: Optional[Any] , a: int=3_0522 , a: List[Any]=768 , a: Tuple=12 , a: List[str]=12 , a: Dict=3072 , a: List[str]="gelu" , a: Any=0.1 , a: Optional[Any]=0.1 , a: str=512 , a: Optional[int]=2 , a: int=0.0_2 , a: str=1e-12 , a: str=1 , a: List[Any]=0 , a: Dict=2 , a: Dict="absolute" , a: List[Any]=True , a: str=None , **a: List[Any] , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase : Optional[Any] = vocab_size
__lowerCamelCase : Optional[Any] = hidden_size
__lowerCamelCase : Dict = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : str = hidden_act
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : int = max_position_embeddings
__lowerCamelCase : Any = type_vocab_size
__lowerCamelCase : int = initializer_range
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : List[Any] = position_embedding_type
__lowerCamelCase : List[str] = use_cache
__lowerCamelCase : Optional[int] = classifier_dropout
class A_ ( __UpperCamelCase ):
'''simple docstring'''
@property
def _snake_case ( self: Optional[Any] ):
if self.task == "multiple-choice":
__lowerCamelCase : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case : int = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['YolosFeatureExtractor']
_snake_case : Optional[int] = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 53 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ConsistencyModelPipeline
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__snake_case = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def _snake_case ( self: str ):
__lowerCamelCase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[str] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _snake_case ( self: int , a: str=False ):
if class_cond:
__lowerCamelCase : str = self.dummy_cond_unet
else:
__lowerCamelCase : str = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCamelCase : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _snake_case ( self: int , a: List[str] , a: Any=0 ):
if str(a ).startswith('mps' ):
__lowerCamelCase : List[Any] = torch.manual_seed(a )
else:
__lowerCamelCase : Tuple = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : Optional[Any] = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components()
__lowerCamelCase : str = ConsistencyModelPipeline(**a )
__lowerCamelCase : str = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Any = self.get_dummy_inputs(a )
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[int] = ConsistencyModelPipeline(**a )
__lowerCamelCase : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(a )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : List[str] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Optional[int] = self.get_dummy_components()
__lowerCamelCase : Tuple = ConsistencyModelPipeline(**a )
__lowerCamelCase : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Tuple = self.get_dummy_inputs(a )
__lowerCamelCase : str = 1
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Any = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[int] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: List[str] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[Any] = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[Any] = ConsistencyModelPipeline(**a )
__lowerCamelCase : List[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_dummy_inputs(a )
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[str] = None
__lowerCamelCase : str = 0
__lowerCamelCase : Tuple = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self: Optional[int] , a: str=0 , a: Tuple=False , a: Tuple="cpu" , a: List[str]=torch.floataa , a: Optional[Any]=(1, 3, 64, 64) ):
__lowerCamelCase : Optional[Any] = torch.manual_seed(a )
__lowerCamelCase : Optional[int] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__lowerCamelCase : Dict = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
__lowerCamelCase : Optional[Any] = latents
return inputs
def _snake_case ( self: Any , a: Any=0 , a: List[str]="cpu" , a: Optional[Any]=torch.floataa , a: int=(1, 3, 64, 64) ):
if type(a ) == str:
__lowerCamelCase : Dict = torch.device(a )
__lowerCamelCase : Union[str, Any] = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : str = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _snake_case ( self: str ):
__lowerCamelCase : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs()
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_inputs()
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Tuple = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : int = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case ( self: Dict ):
__lowerCamelCase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : str = self.get_inputs(get_fixed_latents=a , device=a )
__lowerCamelCase : str = 1
__lowerCamelCase : Union[str, Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : str = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 669 | 0 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_a : Tuple = year % 1_9
_a : int = year % 4
_a : Any = year % 7
_a : Dict = math.floor(year / 1_0_0 )
_a : str = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
_a : Optional[int] = leap_day_inhibits / 4
_a : str = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
_a : Optional[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_a : Optional[int] = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
_a : Tuple = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 1_8 )
else:
return datetime(SCREAMING_SNAKE_CASE__ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
__lowerCAmelCase = """will be""" if year > datetime.now().year else """was"""
print(f'''Easter in {year} {tense} {gauss_easter(year)}''')
| 229 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """trocr"""
__snake_case = ["""past_key_values"""]
__snake_case = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Dict , a: List[str]=5_0265 , a: Optional[Any]=1024 , a: Tuple=12 , a: Dict=16 , a: Optional[Any]=4096 , a: Optional[Any]="gelu" , a: Optional[int]=512 , a: int=0.1 , a: str=0.0 , a: Union[str, Any]=0.0 , a: Any=2 , a: Optional[int]=0.0_2 , a: Optional[Any]=0.0 , a: List[Any]=True , a: Any=False , a: int=True , a: Optional[Any]=True , a: Tuple=1 , a: Union[str, Any]=0 , a: Any=2 , **a: List[Any] , ):
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : Union[str, Any] = d_model
__lowerCamelCase : List[str] = decoder_layers
__lowerCamelCase : Optional[Any] = decoder_attention_heads
__lowerCamelCase : List[str] = decoder_ffn_dim
__lowerCamelCase : Optional[int] = activation_function
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : Dict = dropout
__lowerCamelCase : int = attention_dropout
__lowerCamelCase : List[str] = activation_dropout
__lowerCamelCase : Union[str, Any] = init_std
__lowerCamelCase : Tuple = decoder_layerdrop
__lowerCamelCase : str = use_cache
__lowerCamelCase : List[Any] = scale_embedding
__lowerCamelCase : Any = use_learned_position_embeddings
__lowerCamelCase : List[Any] = layernorm_embedding
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
| 669 | 0 |
"""simple docstring"""
import math
def lowerCamelCase_ (UpperCamelCase__ : List[Any] ):
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : List[Any] = 2
_UpperCAmelCase : Dict = int(math.sqrt(SCREAMING_SNAKE_CASE__ ) ) # Size of every segment
_UpperCAmelCase : Any = [True] * (end + 1)
_UpperCAmelCase : List[Any] = []
while start <= end:
if temp[start] is True:
in_prime.append(SCREAMING_SNAKE_CASE__ )
for i in range(start * start , end + 1 , SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase : int = False
start += 1
prime += in_prime
_UpperCAmelCase : List[Any] = end + 1
_UpperCAmelCase : Optional[int] = min(2 * end , SCREAMING_SNAKE_CASE__ )
while low <= n:
_UpperCAmelCase : List[str] = [True] * (high - low + 1)
for each in in_prime:
_UpperCAmelCase : List[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(SCREAMING_SNAKE_CASE__ , high + 1 , SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase : Any = False
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
if temp[j] is True:
prime.append(j + low )
_UpperCAmelCase : Any = high + 1
_UpperCAmelCase : int = min(high + end , SCREAMING_SNAKE_CASE__ )
return prime
print(sieve(10**6))
| 506 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """tokenizer"""]
__snake_case = """CLIPImageProcessor"""
__snake_case = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self: Union[str, Any] , a: int=None , a: List[str]=None , **a: str ):
__lowerCamelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : str = kwargs.pop('feature_extractor' )
__lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: List[Any]=None , a: List[str]=None , a: int=None , **a: List[Any] ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__lowerCamelCase : Dict = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
__lowerCamelCase : Tuple = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
__lowerCamelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _snake_case ( self: List[Any] , *a: Optional[Any] , **a: int ):
return self.tokenizer.batch_decode(*a , **a )
def _snake_case ( self: Any , *a: Union[str, Any] , **a: Optional[Any] ):
return self.tokenizer.decode(*a , **a )
@property
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = self.tokenizer.model_input_names
__lowerCamelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :Any , __lowercase :str , ):
__lowerCamelCase : List[Any] =parent
__lowerCamelCase : Optional[Any] =13
__lowerCamelCase : str =7
__lowerCamelCase : List[str] =True
__lowerCamelCase : Optional[int] =True
__lowerCamelCase : Dict =True
__lowerCamelCase : Tuple =99
__lowerCamelCase : List[str] =32
__lowerCamelCase : List[Any] =2
__lowerCamelCase : Any =4
__lowerCamelCase : Dict =37
__lowerCamelCase : List[Any] ='gelu'
__lowerCamelCase : List[Any] =0.1
__lowerCamelCase : Optional[Any] =0.1
__lowerCamelCase : Dict =512
__lowerCamelCase : Dict =16
__lowerCamelCase : Dict =2
__lowerCamelCase : List[Any] =0.02
__lowerCamelCase : List[str] =3
__lowerCamelCase : List[Any] =4
__lowerCamelCase : int =None
def __lowercase ( self :Dict ):
__lowerCamelCase : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : str =None
if self.use_input_mask:
__lowerCamelCase : int =random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase : Dict =None
__lowerCamelCase : Optional[int] =None
__lowerCamelCase : Any =None
if self.use_labels:
__lowerCamelCase : str =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase : Any =ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase : int =EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self :Tuple ):
(
__lowerCamelCase
) : int =self.prepare_config_and_inputs()
__lowerCamelCase : Optional[Any] =True
__lowerCamelCase : Any =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowercase ( self :Any , __lowercase :Tuple , __lowercase :Optional[int] , __lowercase :int , __lowercase :Union[str, Any] , __lowercase :int , __lowercase :List[Any] ):
__lowerCamelCase : Dict =TFEsmModel(config=__lowercase )
__lowerCamelCase : int ={'input_ids': input_ids, 'attention_mask': input_mask}
__lowerCamelCase : int =model(__lowercase )
__lowerCamelCase : int =[input_ids, input_mask]
__lowerCamelCase : int =model(__lowercase )
__lowerCamelCase : Optional[Any] =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self :Optional[int] , __lowercase :int , __lowercase :Union[str, Any] , __lowercase :str , __lowercase :Dict , __lowercase :Union[str, Any] , __lowercase :Dict , __lowercase :Optional[int] , __lowercase :Any , ):
__lowerCamelCase : Optional[Any] =True
__lowerCamelCase : int =TFEsmModel(config=__lowercase )
__lowerCamelCase : Tuple ={
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
__lowerCamelCase : List[str] =model(__lowercase )
__lowerCamelCase : Optional[int] =[input_ids, input_mask]
__lowerCamelCase : int =model(__lowercase , encoder_hidden_states=__lowercase )
# Also check the case where encoder outputs are not passed
__lowerCamelCase : Any =model(__lowercase , attention_mask=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self :Union[str, Any] , __lowercase :Optional[int] , __lowercase :Optional[int] , __lowercase :Dict , __lowercase :Tuple , __lowercase :str , __lowercase :str ):
__lowerCamelCase : List[str] =TFEsmForMaskedLM(config=__lowercase )
__lowerCamelCase : List[str] =model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self :List[str] , __lowercase :str , __lowercase :List[str] , __lowercase :Any , __lowercase :Optional[Any] , __lowercase :Union[str, Any] , __lowercase :Optional[Any] ):
__lowerCamelCase : int =self.num_labels
__lowerCamelCase : Optional[int] =TFEsmForTokenClassification(config=__lowercase )
__lowerCamelCase : Dict ={'input_ids': input_ids, 'attention_mask': input_mask}
__lowerCamelCase : Any =model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self :Optional[Any] ):
__lowerCamelCase : Union[str, Any] =self.prepare_config_and_inputs()
(
__lowerCamelCase
) : Any =config_and_inputs
__lowerCamelCase : int ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__snake_case : Any = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case : int = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case : Tuple = False
__snake_case : List[Any] = False
def __lowercase ( self :Union[str, Any] ):
__lowerCamelCase : List[str] =TFEsmModelTester(self )
__lowerCamelCase : Optional[Any] =ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def __lowercase ( self :Union[str, Any] ):
self.config_tester.run_common_tests()
def __lowercase ( self :List[Any] ):
__lowerCamelCase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __lowercase ( self :Union[str, Any] ):
__lowerCamelCase : int =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowercase )
def __lowercase ( self :List[str] ):
__lowerCamelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase )
def __lowercase ( self :List[Any] ):
__lowerCamelCase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase )
@slow
def __lowercase ( self :List[str] ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Dict =TFEsmModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __lowercase ( self :int ):
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __lowercase ( self :Optional[Any] ):
pass
def __lowercase ( self :Optional[int] ):
__lowerCamelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Dict =model_class(__lowercase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
__lowerCamelCase : Optional[Any] =model.get_bias()
assert isinstance(__lowercase , __lowercase )
for k, v in name.items():
assert isinstance(__lowercase , tf.Variable )
else:
__lowerCamelCase : List[str] =model.get_output_embeddings()
assert x is None
__lowerCamelCase : Union[str, Any] =model.get_bias()
assert name is None
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self :Tuple ):
__lowerCamelCase : Optional[Any] =TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
__lowerCamelCase : Optional[int] =tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCamelCase : Tuple =model(__lowercase )[0]
__lowerCamelCase : Optional[Any] =[1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , __lowercase )
# compare the actual values for a slice.
__lowerCamelCase : Optional[int] =tf.constant(
[
[
[8.921518, -10.58_9814, -6.4671307],
[-6.3967156, -13.91_1377, -1.1211915],
[-7.781247, -13.95_1557, -3.740592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def __lowercase ( self :Dict ):
__lowerCamelCase : Optional[int] =TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
__lowerCamelCase : Dict =tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__lowerCamelCase : Any =model(__lowercase )[0]
# compare the actual values for a slice.
__lowerCamelCase : List[Any] =tf.constant(
[
[
[0.14443092, 0.54125327, 0.3247739],
[0.30340484, 0.00526676, 0.31077722],
[0.32278043, -0.24987096, 0.3414628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 179 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _snake_case ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def _snake_case ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase : Any = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def _snake_case ( self: Dict ):
torch.manual_seed(0 )
__lowerCamelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(a )
def _snake_case ( self: List[str] ):
__lowerCamelCase : Union[str, Any] = self.dummy_uncond_unet
__lowerCamelCase : List[str] = DDIMScheduler()
__lowerCamelCase : str = self.dummy_vq_model
__lowerCamelCase : Optional[int] = LDMPipeline(unet=a , vqvae=a , scheduler=a )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
__lowerCamelCase : Optional[int] = torch.manual_seed(0 )
__lowerCamelCase : Any = ldm(generator=a , num_inference_steps=2 , output_type='numpy' ).images
__lowerCamelCase : Tuple = torch.manual_seed(0 )
__lowerCamelCase : Dict = ldm(generator=a , num_inference_steps=2 , output_type='numpy' , return_dict=a )[0]
__lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[int] = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
__lowerCamelCase : str = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
__lowerCamelCase : Dict = torch.manual_seed(0 )
__lowerCamelCase : int = ldm(generator=a , num_inference_steps=5 , output_type='numpy' ).images
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__lowerCamelCase : List[Any] = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
__lowerCamelCase : Union[str, Any] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 669 | 0 |
'''simple docstring'''
from __future__ import annotations
from random import random
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase = None ):
lowerCAmelCase_ = value
lowerCAmelCase_ = random()
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def __repr__( self ):
from pprint import pformat
if self.left is None and self.right is None:
return F'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{F'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1 )
def __str__( self ):
lowerCAmelCase_ = str(self.value ) + ' '
lowerCAmelCase_ = str(self.left or '''''' )
lowerCAmelCase_ = str(self.right or '''''' )
return value + left + right
def snake_case_ ( __snake_case : Any , __snake_case : Optional[int]) -> List[str]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowerCAmelCase_ = split(root.left , SCREAMING_SNAKE_CASE__)
return left, root
else:
lowerCAmelCase_ = split(root.right , SCREAMING_SNAKE_CASE__)
return root, right
def snake_case_ ( __snake_case : str , __snake_case : int) -> Tuple:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowerCAmelCase_ = merge(left.right , SCREAMING_SNAKE_CASE__)
return left
else:
lowerCAmelCase_ = merge(SCREAMING_SNAKE_CASE__ , right.left)
return right
def snake_case_ ( __snake_case : Dict , __snake_case : List[str]) -> List[str]:
lowerCAmelCase_ = Node(SCREAMING_SNAKE_CASE__)
lowerCAmelCase_ = split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
return merge(merge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__)
def snake_case_ ( __snake_case : List[str] , __snake_case : Any) -> str:
lowerCAmelCase_ = split(SCREAMING_SNAKE_CASE__ , value - 1)
lowerCAmelCase_ = split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
return merge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
def snake_case_ ( __snake_case : str) -> str:
if not root: # None
return
else:
inorder(root.left)
print(root.value , end=''',''')
inorder(root.right)
def snake_case_ ( __snake_case : Tuple , __snake_case : Optional[Any]) -> Dict:
for arg in args.split():
if arg[0] == "+":
lowerCAmelCase_ = insert(SCREAMING_SNAKE_CASE__ , int(arg[1:]))
elif arg[0] == "-":
lowerCAmelCase_ = erase(SCREAMING_SNAKE_CASE__ , int(arg[1:]))
else:
print('''Unknown command''')
return root
def snake_case_ ( ) -> int:
lowerCAmelCase_ = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''')
lowerCAmelCase_ = input()
while args != "q":
lowerCAmelCase_ = interact_treap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
print(SCREAMING_SNAKE_CASE__)
lowerCAmelCase_ = input()
print('''good by!''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 274 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = 'Usage of script: script_name <size_of_canvas:int>'
lowercase_ = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = [[False for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
return canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
for i, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for j, _ in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = bool(random.getrandbits(1 ) )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = np.array(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for c, pt in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = __judge_point(
SCREAMING_SNAKE_CASE__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__lowerCamelCase : Any = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__lowerCamelCase : list[list[bool]] = current_canvas.tolist()
return return_canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Optional[Any] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__lowerCamelCase : Tuple = pt
if pt:
if alive < 2:
__lowerCamelCase : Optional[Any] = False
elif alive == 2 or alive == 3:
__lowerCamelCase : Any = True
elif alive > 3:
__lowerCamelCase : Dict = False
else:
if alive == 3:
__lowerCamelCase : Tuple = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ ,lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(['w', 'k'])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 669 | 0 |
'''simple docstring'''
def UpperCAmelCase ( UpperCAmelCase__ : List[str]):
if not all(x.isalpha() for x in string):
raise ValueError('String must only contain alphabetic characters.')
lowerCamelCase : Union[str, Any] = sorted(string.lower())
return len(SCREAMING_SNAKE_CASE__) == len(set(SCREAMING_SNAKE_CASE__))
if __name__ == "__main__":
A = input('Enter a string ').strip()
A = is_isogram(input_str)
print(f"""{input_str} is {"an" if isogram else "not an"} isogram.""")
| 320 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """char"""
__snake_case = """bpe"""
__snake_case = """wp"""
lowercase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """char_tokenizer"""]
__snake_case = """ViTImageProcessor"""
__snake_case = """MgpstrTokenizer"""
def __init__( self: int , a: Dict=None , a: Optional[int]=None , **a: List[str] ):
__lowerCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : Optional[Any] = kwargs.pop('feature_extractor' )
__lowerCamelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
__lowerCamelCase : Any = tokenizer
__lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('gpt2' )
__lowerCamelCase : int = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: Optional[int]=None , a: List[Any]=None , a: int=None , **a: str ):
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCamelCase : Dict = self.image_processor(a , return_tensors=a , **a )
if text is not None:
__lowerCamelCase : Dict = self.char_tokenizer(a , return_tensors=a , **a )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase : List[str] = encodings['input_ids']
return inputs
def _snake_case ( self: List[str] , a: List[Any] ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = sequences
__lowerCamelCase : List[str] = char_preds.size(0 )
__lowerCamelCase , __lowerCamelCase : str = self._decode_helper(a , 'char' )
__lowerCamelCase , __lowerCamelCase : Optional[int] = self._decode_helper(a , 'bpe' )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self._decode_helper(a , 'wp' )
__lowerCamelCase : Tuple = []
__lowerCamelCase : List[Any] = []
for i in range(a ):
__lowerCamelCase : List[Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
__lowerCamelCase : Optional[int] = [char_strs[i], bpe_strs[i], wp_strs[i]]
__lowerCamelCase : Any = scores.index(max(a ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Optional[int] = final_strs
__lowerCamelCase : Dict = final_scores
__lowerCamelCase : Dict = char_strs
__lowerCamelCase : List[Any] = bpe_strs
__lowerCamelCase : Tuple = wp_strs
return out
def _snake_case ( self: int , a: Optional[int] , a: Optional[Any] ):
if format == DecodeType.CHARACTER:
__lowerCamelCase : Optional[Any] = self.char_decode
__lowerCamelCase : Union[str, Any] = 1
__lowerCamelCase : List[str] = '[s]'
elif format == DecodeType.BPE:
__lowerCamelCase : Dict = self.bpe_decode
__lowerCamelCase : List[str] = 2
__lowerCamelCase : Any = '#'
elif format == DecodeType.WORDPIECE:
__lowerCamelCase : List[str] = self.wp_decode
__lowerCamelCase : int = 102
__lowerCamelCase : Dict = '[SEP]'
else:
raise ValueError(F'Format {format} is not supported.' )
__lowerCamelCase , __lowerCamelCase : int = [], []
__lowerCamelCase : Tuple = pred_logits.size(0 )
__lowerCamelCase : List[Any] = pred_logits.size(1 )
__lowerCamelCase , __lowerCamelCase : Dict = pred_logits.topk(1 , dim=-1 , largest=a , sorted=a )
__lowerCamelCase : List[str] = preds_index.view(-1 , a )[:, 1:]
__lowerCamelCase : Dict = decoder(a )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = torch.nn.functional.softmax(a , dim=2 ).max(dim=2 )
__lowerCamelCase : List[str] = preds_max_prob[:, 1:]
for index in range(a ):
__lowerCamelCase : str = preds_str[index].find(a )
__lowerCamelCase : Tuple = preds_str[index][:pred_eos]
__lowerCamelCase : Any = preds_index[index].cpu().tolist()
__lowerCamelCase : Any = pred_index.index(a ) if eos_token in pred_index else -1
__lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1]
__lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(a )
conf_scores.append(a )
return dec_strs, conf_scores
def _snake_case ( self: Tuple , a: Optional[int] ):
__lowerCamelCase : Dict = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(a )]
return decode_strs
def _snake_case ( self: Optional[int] , a: Tuple ):
return self.bpe_tokenizer.batch_decode(a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : int = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(a )]
return decode_strs
| 669 | 0 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
a__ = re.compile(R'''\s+''')
def A__ (snake_case : Optional[int] ) -> Union[str, Any]:
return {"hash": hashlib.mda(re.sub(SCREAMING_SNAKE_CASE__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def A__ (snake_case : Optional[int] ) -> str:
__UpperCamelCase : Any = [len(SCREAMING_SNAKE_CASE__ ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(SCREAMING_SNAKE_CASE__ ), "line_max": max(SCREAMING_SNAKE_CASE__ )}
def A__ (snake_case : Union[str, Any] ) -> Optional[int]:
__UpperCamelCase : List[Any] = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def A__ (snake_case : List[str] , snake_case : Union[str, Any] ) -> List[Any]:
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def A__ (snake_case : List[str] , snake_case : int=5 ) -> Dict:
__UpperCamelCase : str = ['auto-generated', 'autogenerated', 'automatically generated']
__UpperCamelCase : int = example['content'].splitlines()
for _, line in zip(range(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A__ (snake_case : List[Any] , snake_case : List[str]=5 , snake_case : Optional[Any]=0.05 ) -> List[Any]:
__UpperCamelCase : Any = ['unit tests', 'test file', 'configuration file']
__UpperCamelCase : List[Any] = example['content'].splitlines()
__UpperCamelCase : List[Any] = 0
__UpperCamelCase : Any = 0
# first test
for _, line in zip(range(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
__UpperCamelCase : Any = example['content'].count("""\n""" )
__UpperCamelCase : str = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A__ (snake_case : Union[str, Any] ) -> Any:
__UpperCamelCase : List[str] = ['def ', 'class ', 'for ', 'while ']
__UpperCamelCase : List[Any] = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A__ (snake_case : Union[str, Any] , snake_case : int=4 ) -> Dict:
__UpperCamelCase : Optional[int] = example['content'].splitlines()
__UpperCamelCase : List[Any] = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A__ (snake_case : Tuple ) -> Tuple:
__UpperCamelCase : Tuple = tokenizer(example["""content"""] , truncation=SCREAMING_SNAKE_CASE__ )['input_ids']
__UpperCamelCase : str = len(example["""content"""] ) / len(SCREAMING_SNAKE_CASE__ )
return {"ratio": ratio}
def A__ (snake_case : str ) -> Optional[Any]:
__UpperCamelCase : List[Any] = {}
results.update(get_hash(SCREAMING_SNAKE_CASE__ ) )
results.update(line_stats(SCREAMING_SNAKE_CASE__ ) )
results.update(alpha_stats(SCREAMING_SNAKE_CASE__ ) )
results.update(char_token_ratio(SCREAMING_SNAKE_CASE__ ) )
results.update(is_autogenerated(SCREAMING_SNAKE_CASE__ ) )
results.update(is_config_or_test(SCREAMING_SNAKE_CASE__ ) )
results.update(has_no_keywords(SCREAMING_SNAKE_CASE__ ) )
results.update(has_few_assignments(SCREAMING_SNAKE_CASE__ ) )
return results
def A__ (snake_case : Tuple , snake_case : Optional[Any] , snake_case : Optional[Any] ) -> Tuple:
if not check_uniques(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A__ (snake_case : Tuple ) -> Union[str, Any]:
with open(SCREAMING_SNAKE_CASE__ , """rb""" ) as f_in:
with gzip.open(str(SCREAMING_SNAKE_CASE__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
os.unlink(SCREAMING_SNAKE_CASE__ )
# Settings
a__ = HfArgumentParser(PreprocessingArguments)
a__ = parser.parse_args()
if args.num_workers is None:
a__ = multiprocessing.cpu_count()
a__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
a__ = time.time()
a__ = load_dataset(args.dataset_name, split='''train''')
print(f"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
a__ = time.time()
a__ = ds.map(preprocess, num_proc=args.num_workers)
print(f"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
a__ = set(ds.unique('''hash'''))
a__ = len(uniques) / len(ds)
print(f"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
a__ = time.time()
a__ = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(f"Time to filter dataset: {time.time()-t_start:.2f}")
print(f"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
a__ = time.time()
a__ , a__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(f"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
a__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
a__ = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
a__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
a__ = str(data_dir / f"file-{file_number+1:012}.json")
a__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"Time to save dataset: {time.time()-t_start:.2f}")
| 279 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
__lowerCamelCase : Optional[int] = TOKENIZER_CLASSES
else:
__lowerCamelCase : Union[str, Any] = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE__ , tokenizer_name + 'Fast' )}
logger.info(f'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
__lowerCamelCase : int = TOKENIZER_CLASSES[tokenizer_name]
__lowerCamelCase : Optional[int] = True
if checkpoint_name is None:
__lowerCamelCase : List[Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowerCamelCase : Optional[Any] = [checkpoint_name]
logger.info(f'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(f'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
__lowerCamelCase : Tuple = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ )
# Save fast tokenizer
logger.info(f'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowerCamelCase , __lowerCamelCase : Tuple = checkpoint.split('/' )
__lowerCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif add_prefix:
__lowerCamelCase : Any = checkpoint
__lowerCamelCase : Dict = dump_path
else:
__lowerCamelCase : List[str] = None
__lowerCamelCase : Optional[int] = dump_path
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowerCamelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowerCamelCase : int = file_path.split(SCREAMING_SNAKE_CASE__ )[-1][0]
if next_char == "/":
__lowerCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = None
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
__lowerCamelCase : Dict = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ , filename_prefix=SCREAMING_SNAKE_CASE__ )
logger.info(f'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(SCREAMING_SNAKE_CASE__ )
logger.info(f'=> removing {file_name}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
lowercase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 0 |
import random
def A__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict = False ) -> Any:
"""simple docstring"""
_UpperCAmelCase = {i: [] for i in range(SCREAMING_SNAKE_CASE__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(SCREAMING_SNAKE_CASE__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
if random.random() < probability:
graph[i].append(SCREAMING_SNAKE_CASE__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(SCREAMING_SNAKE_CASE__ )
return graph
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str:
"""simple docstring"""
return {
i: [j for j in range(SCREAMING_SNAKE_CASE__ ) if i != j] for i in range(SCREAMING_SNAKE_CASE__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : List[str] = PegasusTokenizer(a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self: List[Any] ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def _snake_case ( self: Tuple , **a: List[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: List[Any] , a: int ):
return ("This is a test", "This is a test")
def _snake_case ( self: Any ):
__lowerCamelCase : Dict = '</s>'
__lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(a ) , 1103 )
def _snake_case ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
__lowerCamelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
__lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
def _snake_case ( self: int ):
__lowerCamelCase : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__lowerCamelCase : Tuple = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
__lowerCamelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
__lowerCamelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
__lowerCamelCase : int = 'To ensure a smooth flow of bank resolutions.'
__lowerCamelCase : Union[str, Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
__lowerCamelCase : List[str] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _snake_case ( self: str ):
__lowerCamelCase : List[str] = ['This is going to be way too long.' * 150, 'short example']
__lowerCamelCase : Tuple = ['not super long but more than 5 tokens', 'tiny']
__lowerCamelCase : Union[str, Any] = self._large_tokenizer(a , padding=a , truncation=a , return_tensors='pt' )
__lowerCamelCase : List[str] = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
@slow
def _snake_case ( self: List[str] ):
# fmt: off
__lowerCamelCase : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: str ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : str = PegasusTokenizer(a , offset=0 , mask_token_sent=a , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self: List[str] ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def _snake_case ( self: Union[str, Any] , **a: Dict ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: List[str] , a: Any ):
return ("This is a test", "This is a test")
def _snake_case ( self: Any ):
__lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
__lowerCamelCase : int = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
__lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
@require_torch
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Union[str, Any] = ['This is going to be way too long.' * 1000, 'short example']
__lowerCamelCase : Tuple = ['not super long but more than 5 tokens', 'tiny']
__lowerCamelCase : str = self._large_tokenizer(a , padding=a , truncation=a , return_tensors='pt' )
__lowerCamelCase : Any = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
def _snake_case ( self: Any ):
__lowerCamelCase : int = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
__lowerCamelCase : Dict = self._large_tokenizer(a ).input_ids
self.assertListEqual(
a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 669 | 0 |
"""simple docstring"""
def a ( __snake_case : List[Any] ):
'''simple docstring'''
return 10 - x * x
def a ( __snake_case : Optional[int], __snake_case : List[str] ):
'''simple docstring'''
if equation(SCREAMING_SNAKE_CASE__ ) * equation(SCREAMING_SNAKE_CASE__ ) >= 0:
raise ValueError('''Wrong space!''' )
UpperCAmelCase_ :str = a
while (b - a) >= 0.01:
# Find middle point
UpperCAmelCase_ :int = (a + b) / 2
# Check if middle point is root
if equation(SCREAMING_SNAKE_CASE__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(SCREAMING_SNAKE_CASE__ ) * equation(SCREAMING_SNAKE_CASE__ ) < 0:
UpperCAmelCase_ :str = c
else:
UpperCAmelCase_ :Optional[Any] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 608 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
SCREAMING_SNAKE_CASE__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowerCamelCase ( _snake_case : List[str] ):
'''simple docstring'''
lowercase__ = []
lowercase__ = len(SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowercase__ = -1
for j in range(i + 1 ,SCREAMING_SNAKE_CASE__ ):
if arr[i] < arr[j]:
lowercase__ = arr[j]
break
result.append(SCREAMING_SNAKE_CASE__ )
return result
def lowerCamelCase ( _snake_case : Dict ):
'''simple docstring'''
lowercase__ = []
for i, outer in enumerate(SCREAMING_SNAKE_CASE__ ):
lowercase__ = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase__ = inner
break
result.append(SCREAMING_SNAKE_CASE__ )
return result
def lowerCamelCase ( _snake_case : Optional[int] ):
'''simple docstring'''
lowercase__ = len(SCREAMING_SNAKE_CASE__ )
lowercase__ = []
lowercase__ = [-1] * arr_size
for index in reversed(range(SCREAMING_SNAKE_CASE__ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase__ = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
SCREAMING_SNAKE_CASE__ = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 267 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = 2
while i * i <= n:
__lowerCamelCase : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCamelCase__ ( ):
__lowerCamelCase : str = 1
__lowerCamelCase : List[str] = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 669 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowercase : Dict = '__DUMMY_TRANSFORMERS_USER__'
lowercase : Union[str, Any] = 'Dummy User'
lowercase : List[Any] = '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'
lowercase : Dict = 'https://hub-ci.huggingface.co'
lowercase : int = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
lowercase : Tuple = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
lowercase : Tuple = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict) -> Optional[int]:
'''simple docstring'''
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , SCREAMING_SNAKE_CASE__)
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> str:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , SCREAMING_SNAKE_CASE__)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , SCREAMING_SNAKE_CASE__)
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any) -> List[str]:
'''simple docstring'''
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , SCREAMING_SNAKE_CASE__)
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple) -> Dict:
'''simple docstring'''
HfFolder.save_token(SCREAMING_SNAKE_CASE__)
yield
HfFolder.delete_token()
@pytest.fixture(scope="session")
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
'''simple docstring'''
return HfApi(endpoint=SCREAMING_SNAKE_CASE__)
@pytest.fixture(scope="session")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : List[Any] = HfFolder.get_token()
HfFolder.save_token(SCREAMING_SNAKE_CASE__)
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(SCREAMING_SNAKE_CASE__)
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> Union[str, Any]:
'''simple docstring'''
def _cleanup_repo(_lowerCamelCase : Any):
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> str:
'''simple docstring'''
@contextmanager
def _temporary_repo(_lowerCamelCase : Union[str, Any]):
try:
yield repo_id
finally:
cleanup_repo(SCREAMING_SNAKE_CASE__)
return _temporary_repo
@pytest.fixture(scope="session")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]) -> int:
'''simple docstring'''
__UpperCamelCase : Any = F'repo_txt_data-{int(time.time() * 10e3)}'
__UpperCamelCase : str = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type="dataset" , private=SCREAMING_SNAKE_CASE__)
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE__ , path_or_fileobj=str(SCREAMING_SNAKE_CASE__) , path_in_repo="data/text_data.txt" , repo_id=SCREAMING_SNAKE_CASE__ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any]) -> str:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int]) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : str = F'repo_zipped_txt_data-{int(time.time() * 10e3)}'
__UpperCamelCase : str = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type="dataset" , private=SCREAMING_SNAKE_CASE__)
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE__ , path_or_fileobj=str(SCREAMING_SNAKE_CASE__) , path_in_repo="data.zip" , repo_id=SCREAMING_SNAKE_CASE__ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict , _lowerCamelCase : Dict , _lowerCamelCase : int) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Any = F'repo_zipped_img_data-{int(time.time() * 10e3)}'
__UpperCamelCase : Optional[int] = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type="dataset" , private=SCREAMING_SNAKE_CASE__)
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE__ , path_or_fileobj=str(SCREAMING_SNAKE_CASE__) , path_in_repo="data.zip" , repo_id=SCREAMING_SNAKE_CASE__ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : int) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_ | 557 |
import numpy as np
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] ):
__lowerCamelCase : int = (0, 0)
__lowerCamelCase : List[str] = None
__lowerCamelCase : int = 0
__lowerCamelCase : int = 0
__lowerCamelCase : Union[str, Any] = 0
def __eq__( self: Optional[int] , a: List[Any] ):
return self.position == cell.position
def _snake_case ( self: Any ):
print(self.position )
class A_ :
'''simple docstring'''
def __init__( self: str , a: List[str]=(5, 5) ):
__lowerCamelCase : Optional[Any] = np.zeros(a )
__lowerCamelCase : List[str] = world_size[0]
__lowerCamelCase : Optional[int] = world_size[1]
def _snake_case ( self: List[Any] ):
print(self.w )
def _snake_case ( self: Optional[int] , a: str ):
__lowerCamelCase : Tuple = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__lowerCamelCase : Optional[int] = cell.position[0]
__lowerCamelCase : List[str] = cell.position[1]
__lowerCamelCase : Dict = []
for n in neughbour_cord:
__lowerCamelCase : Dict = current_x + n[0]
__lowerCamelCase : Optional[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__lowerCamelCase : Optional[Any] = Cell()
__lowerCamelCase : Any = (x, y)
__lowerCamelCase : Dict = cell
neighbours.append(a )
return neighbours
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = []
__lowerCamelCase : int = []
_open.append(SCREAMING_SNAKE_CASE__ )
while _open:
__lowerCamelCase : Union[str, Any] = np.argmin([n.f for n in _open] )
__lowerCamelCase : int = _open[min_f]
_closed.append(_open.pop(SCREAMING_SNAKE_CASE__ ) )
if current == goal:
break
for n in world.get_neigbours(SCREAMING_SNAKE_CASE__ ):
for c in _closed:
if c == n:
continue
__lowerCamelCase : Optional[int] = current.g + 1
__lowerCamelCase , __lowerCamelCase : int = n.position
__lowerCamelCase , __lowerCamelCase : Tuple = goal.position
__lowerCamelCase : Dict = (ya - ya) ** 2 + (xa - xa) ** 2
__lowerCamelCase : str = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = []
while current.parent is not None:
path.append(current.position )
__lowerCamelCase : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowercase_ = Gridworld()
# Start position and goal
lowercase_ = Cell()
lowercase_ = (0, 0)
lowercase_ = Cell()
lowercase_ = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
lowercase_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowercase_ = 1
print(world.w)
| 669 | 0 |
from random import randint, random
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : Any, lowerCAmelCase_ : List[str], lowerCAmelCase_ : Dict = False, lowerCAmelCase_ : List[str] = False, lowerCAmelCase_ : Optional[int] = 5, ):
__lowerCAmelCase = [[-1] * number_of_cells] # Create a highway without any car
__lowerCAmelCase = 0
__lowerCAmelCase = max(SCREAMING_SNAKE_CASE__, 0 )
while i < number_of_cells:
__lowerCAmelCase = (
randint(0, SCREAMING_SNAKE_CASE__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1, max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : Dict ):
__lowerCAmelCase = 0
__lowerCAmelCase = highway_now[car_index + 1 :]
for cell in range(len(SCREAMING_SNAKE_CASE__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(SCREAMING_SNAKE_CASE__, -1 )
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : str, lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE__ )
# Beforce calculations, the highway is empty
__lowerCAmelCase = [-1] * number_of_cells
for car_index in range(SCREAMING_SNAKE_CASE__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__lowerCAmelCase = min(highway_now[car_index] + 1, SCREAMING_SNAKE_CASE__ )
# Number of empty cell before the next car
__lowerCAmelCase = get_distance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) - 1
# We can't have the car causing an accident
__lowerCAmelCase = min(next_highway[car_index], SCREAMING_SNAKE_CASE__ )
if random() < probability:
# Randomly, a driver will slow down
__lowerCAmelCase = max(next_highway[car_index] - 1, 0 )
return next_highway
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Dict, lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = len(highway[0] )
for i in range(SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = update(highway[i], SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = [-1] * number_of_cells
for car_index in range(SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__lowerCAmelCase = (car_index + speed) % number_of_cells
# Commit the change of position
__lowerCAmelCase = speed
highway.append(SCREAMING_SNAKE_CASE__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 |
import math
from datetime import datetime, timedelta
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = year % 19
__lowerCamelCase : int = year % 4
__lowerCamelCase : Any = year % 7
__lowerCamelCase : Dict = math.floor(year / 100 )
__lowerCamelCase : str = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__lowerCamelCase : Optional[int] = leap_day_inhibits / 4
__lowerCamelCase : str = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__lowerCamelCase : Optional[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowerCamelCase : Optional[int] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__lowerCamelCase : Tuple = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 18 )
else:
return datetime(SCREAMING_SNAKE_CASE__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowercase_ = 'will be' if year > datetime.now().year else 'was'
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 669 | 0 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCAmelCase = random.Random()
def UpperCAmelCase_ (__a : int , __a : str=1.0 , __a : Union[str, Any]=None , __a : Optional[int]=None ):
"""simple docstring"""
if rng is None:
_a : Any = global_rng
_a : Optional[int] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int ,_a : List[Any] ,_a : List[Any]=7 ,_a : Tuple=400 ,_a : List[Any]=2000 ,_a : Dict=10 ,_a : Dict=160 ,_a : Optional[int]=8 ,_a : Tuple=0.0 ,_a : List[Any]=4000 ,_a : Optional[int]=False ,_a : str=True ,):
'''simple docstring'''
_a : Union[str, Any] = parent
_a : List[Any] = batch_size
_a : Optional[Any] = min_seq_length
_a : str = max_seq_length
_a : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a : List[str] = padding_value
_a : int = sampling_rate
_a : Union[str, Any] = return_attention_mask
_a : str = do_normalize
_a : Optional[Any] = feature_size
_a : Any = chunk_length
_a : List[Any] = hop_length
def __lowercase ( self : str ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowercase ( self : List[Any] ,_a : int=False ,_a : Tuple=False ):
'''simple docstring'''
def _flatten(_a : Any ):
return list(itertools.chain(*_a ) )
if equal_length:
_a : int = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_a : Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
_a : str = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = WhisperFeatureExtractor if is_speech_available() else None
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = WhisperFeatureExtractionTester(self )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_a : Optional[Any] = self.feature_extraction_class.from_pretrained(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Any = feat_extract_second.to_dict()
_a : Dict = feat_extract_first.mel_filters
_a : str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a ,_a ) )
self.assertEqual(_a ,_a )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = os.path.join(_a ,'feat_extract.json' )
feat_extract_first.to_json_file(_a )
_a : str = self.feature_extraction_class.from_json_file(_a )
_a : Union[str, Any] = feat_extract_first.to_dict()
_a : List[Any] = feat_extract_second.to_dict()
_a : Optional[Any] = feat_extract_first.mel_filters
_a : Optional[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a ,_a ) )
self.assertEqual(_a ,_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_a : int = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_a : str = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
_a : Union[str, Any] = feature_extractor(_a ,padding='max_length' ,return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_a : Any = feature_extractor(speech_inputs[0] ,return_tensors='np' ).input_features
_a : Optional[Any] = feature_extractor(np_speech_inputs[0] ,return_tensors='np' ).input_features
self.assertTrue(np.allclose(_a ,_a ,atol=1E-3 ) )
# Test batched
_a : str = feature_extractor(_a ,return_tensors='np' ).input_features
_a : List[Any] = feature_extractor(_a ,return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(_a ,_a ):
self.assertTrue(np.allclose(_a ,_a ,atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_a : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_a : Tuple = np.asarray(_a )
_a : Any = feature_extractor(_a ,return_tensors='np' ).input_features
_a : Optional[Any] = feature_extractor(_a ,return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(_a ,_a ):
self.assertTrue(np.allclose(_a ,_a ,atol=1E-3 ) )
# Test truncation required
_a : int = [floats_list((1, x) )[0] for x in range(200 ,(feature_extractor.n_samples + 500) ,200 )]
_a : Optional[Any] = [np.asarray(_a ) for speech_input in speech_inputs]
_a : List[str] = [x[: feature_extractor.n_samples] for x in speech_inputs]
_a : str = [np.asarray(_a ) for speech_input in speech_inputs_truncated]
_a : int = feature_extractor(_a ,return_tensors='np' ).input_features
_a : List[Any] = feature_extractor(_a ,return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(_a ,_a ):
self.assertTrue(np.allclose(_a ,_a ,atol=1E-3 ) )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
import torch
_a : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a : Optional[int] = np.random.rand(100 ,32 ).astype(np.floataa )
_a : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_a : Optional[Any] = feature_extractor.pad([{'input_features': inputs}] ,return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_a : Dict = feature_extractor.pad([{'input_features': inputs}] ,return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __lowercase ( self : Tuple ,_a : List[str] ):
'''simple docstring'''
_a : Dict = load_dataset('hf-internal-testing/librispeech_asr_dummy' ,'clean' ,split='validation' )
# automatic decoding with librispeech
_a : List[Any] = ds.sort('id' ).select(range(_a ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : List[str] = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_a : Union[str, Any] = self._load_datasamples(1 )
_a : str = WhisperFeatureExtractor()
_a : Dict = feature_extractor(_a ,return_tensors='pt' ).input_features
self.assertEqual(input_features.shape ,(1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] ,_a ,atol=1E-4 ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a : Optional[Any] = self._load_datasamples(1 )[0]
_a : Optional[Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_a : Optional[int] = feat_extract.zero_mean_unit_var_norm([audio] ,attention_mask=_a )[0]
self.assertTrue(np.all(np.mean(_a ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ) - 1 ) < 1E-3 ) )
| 229 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A_ ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__snake_case = 1
@register_to_config
def __init__( self: str , a: str=2000 , a: List[str]=0.1 , a: Any=20 , a: Dict=1e-3 ):
__lowerCamelCase : Dict = None
__lowerCamelCase : Any = None
__lowerCamelCase : Optional[int] = None
def _snake_case ( self: int , a: str , a: Union[str, torch.device] = None ):
__lowerCamelCase : int = torch.linspace(1 , self.config.sampling_eps , a , device=a )
def _snake_case ( self: List[Any] , a: Union[str, Any] , a: Tuple , a: Optional[Any] , a: Dict=None ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowerCamelCase : Tuple = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowerCamelCase : Optional[int] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowerCamelCase : Optional[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowerCamelCase : List[str] = std.unsqueeze(-1 )
__lowerCamelCase : Any = -score / std
# compute
__lowerCamelCase : List[Any] = -1.0 / len(self.timesteps )
__lowerCamelCase : Any = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowerCamelCase : Dict = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowerCamelCase : int = beta_t.unsqueeze(-1 )
__lowerCamelCase : Any = -0.5 * beta_t * x
__lowerCamelCase : List[Any] = torch.sqrt(a )
__lowerCamelCase : Tuple = drift - diffusion**2 * score
__lowerCamelCase : str = x + drift * dt
# add noise
__lowerCamelCase : Any = randn_tensor(x.shape , layout=x.layout , generator=a , device=x.device , dtype=x.dtype )
__lowerCamelCase : Any = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self: Optional[int] ):
return self.config.num_train_timesteps
| 669 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A=2 , A=3 , A=4 , A=2 , A=7 , A=True , A=True , A=True , A=True , A=9_9 , A=3_6 , A=2 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=6 , A=6 , A=3 , A=4 , A=None , A=1_0_0_0 , ) -> Optional[Any]:
_UpperCAmelCase : List[str] = parent
_UpperCAmelCase : Optional[Any] = batch_size
_UpperCAmelCase : Optional[int] = num_channels
_UpperCAmelCase : str = image_size
_UpperCAmelCase : int = patch_size
_UpperCAmelCase : List[str] = is_training
_UpperCAmelCase : Dict = use_input_mask
_UpperCAmelCase : Any = use_token_type_ids
_UpperCAmelCase : List[str] = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : List[Any] = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : List[Any] = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Any = hidden_dropout_prob
_UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
_UpperCAmelCase : Dict = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : List[str] = coordinate_size
_UpperCAmelCase : int = shape_size
_UpperCAmelCase : Union[str, Any] = num_labels
_UpperCAmelCase : int = num_choices
_UpperCAmelCase : int = scope
_UpperCAmelCase : Any = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCAmelCase : Any = text_seq_length
_UpperCAmelCase : Optional[Any] = (image_size // patch_size) ** 2 + 1
_UpperCAmelCase : Any = self.text_seq_length + self.image_seq_length
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
_UpperCAmelCase : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase : List[str] = bbox[i, j, 3]
_UpperCAmelCase : str = bbox[i, j, 1]
_UpperCAmelCase : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase : Tuple = bbox[i, j, 2]
_UpperCAmelCase : Any = bbox[i, j, 0]
_UpperCAmelCase : List[str] = tmp_coordinate
_UpperCAmelCase : str = tf.constant(A )
_UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : Any = None
if self.use_input_mask:
_UpperCAmelCase : int = random_attention_mask([self.batch_size, self.text_seq_length] )
_UpperCAmelCase : Tuple = None
if self.use_token_type_ids:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_UpperCAmelCase : Dict = None
_UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_UpperCAmelCase : Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> Dict:
_UpperCAmelCase : Optional[Any] = TFLayoutLMvaModel(config=A )
# text + image
_UpperCAmelCase : Optional[Any] = model(A , pixel_values=A , training=A )
_UpperCAmelCase : int = model(
A , bbox=A , pixel_values=A , attention_mask=A , token_type_ids=A , training=A , )
_UpperCAmelCase : List[Any] = model(A , bbox=A , pixel_values=A , training=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_UpperCAmelCase : List[Any] = model(A , training=A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_UpperCAmelCase : Optional[Any] = model({'''pixel_values''': pixel_values} , training=A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , A , A , A , A , A , A , A ) -> int:
_UpperCAmelCase : List[str] = self.num_labels
_UpperCAmelCase : str = TFLayoutLMvaForSequenceClassification(config=A )
_UpperCAmelCase : int = model(
A , bbox=A , pixel_values=A , attention_mask=A , token_type_ids=A , labels=A , training=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , A , A , A , A , A , A , A ) -> Dict:
_UpperCAmelCase : Union[str, Any] = self.num_labels
_UpperCAmelCase : Any = TFLayoutLMvaForTokenClassification(config=A )
_UpperCAmelCase : Optional[Any] = model(
A , bbox=A , pixel_values=A , attention_mask=A , token_type_ids=A , labels=A , training=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCAmelCase ( self , A , A , A , A , A , A , A ) -> Union[str, Any]:
_UpperCAmelCase : List[Any] = 2
_UpperCAmelCase : Any = TFLayoutLMvaForQuestionAnswering(config=A )
_UpperCAmelCase : Any = model(
A , bbox=A , pixel_values=A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , training=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : str = self.prepare_config_and_inputs()
(_UpperCAmelCase) : List[Any] = config_and_inputs
_UpperCAmelCase : Tuple = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
'''simple docstring'''
a__ =(
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
a__ =(
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
a__ =False
a__ =False
a__ =False
def __lowerCAmelCase ( self , A , A , A , A , A ) -> List[str]:
return True
def __lowerCAmelCase ( self , A , A , A=False ) -> List[str]:
_UpperCAmelCase : List[str] = copy.deepcopy(A )
if model_class in get_values(A ):
_UpperCAmelCase : Tuple = {
k: tf.tile(tf.expand_dims(A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(A , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A ):
_UpperCAmelCase : Any = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(A ):
_UpperCAmelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
_UpperCAmelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(A ):
_UpperCAmelCase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(A ):
_UpperCAmelCase : Dict = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : int = TFLayoutLMvaModelTester(self )
_UpperCAmelCase : str = ConfigTester(self , config_class=A , hidden_size=3_7 )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(A )
if getattr(A , '''hf_compute_loss''' , A ):
# The number of elements in the loss should be the same as the number of elements in the label
_UpperCAmelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , A , return_labels=A )
_UpperCAmelCase : int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=A )[0]
]
_UpperCAmelCase : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_UpperCAmelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , A , return_labels=A )
_UpperCAmelCase : Dict = prepared_for_class.pop('''input_ids''' )
_UpperCAmelCase : str = model(A , **A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
_UpperCAmelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , A , return_labels=A )
_UpperCAmelCase : List[str] = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
_UpperCAmelCase : int = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
_UpperCAmelCase : Tuple = -1_0_0
_UpperCAmelCase : Tuple = tf.convert_to_tensor(A )
_UpperCAmelCase : Tuple = model(A , **A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
_UpperCAmelCase : int = self._prepare_for_class(inputs_dict.copy() , A , return_labels=A )
_UpperCAmelCase : str = model(A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
_UpperCAmelCase : str = self._prepare_for_class(inputs_dict.copy() , A , return_labels=A )
# Get keys that were added with the _prepare_for_class function
_UpperCAmelCase : Optional[Any] = prepared_for_class.keys() - inputs_dict.keys()
_UpperCAmelCase : List[Any] = inspect.signature(model.call ).parameters
_UpperCAmelCase : List[str] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
_UpperCAmelCase : Optional[int] = {0: 'input_ids'}
for label_key in label_keys:
_UpperCAmelCase : Dict = signature_names.index(A )
_UpperCAmelCase : str = label_key
_UpperCAmelCase : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
_UpperCAmelCase : Optional[int] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
_UpperCAmelCase : Optional[int] = prepared_for_class[value]
_UpperCAmelCase : Any = tuple(A )
# Send to model
_UpperCAmelCase : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __lowerCAmelCase ( self ) -> Optional[int]:
(
_UpperCAmelCase
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A , A , A , A , A , A )
def __lowerCAmelCase ( self ) -> Dict:
(
_UpperCAmelCase
) : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(A , A , A , A , A , A )
def __lowerCAmelCase ( self ) -> Dict:
(
_UpperCAmelCase
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
A , A , A , A , A , A , A )
def __lowerCAmelCase ( self ) -> str:
(
_UpperCAmelCase
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
A , A , A , A , A , A , A )
def __lowerCAmelCase ( self ) -> Any:
(
_UpperCAmelCase
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
A , A , A , A , A , A , A )
@slow
def __lowerCAmelCase ( self ) -> Any:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Dict = TFLayoutLMvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowerCamelCase_ ():
_UpperCAmelCase : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self ) -> str:
return LayoutLMvaImageProcessor(apply_ocr=A ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
_UpperCAmelCase : Tuple = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
_UpperCAmelCase : Union[str, Any] = self.default_image_processor
_UpperCAmelCase : List[Any] = prepare_img()
_UpperCAmelCase : str = image_processor(images=A , return_tensors='''tf''' ).pixel_values
_UpperCAmelCase : Union[str, Any] = tf.constant([[1, 2]] )
_UpperCAmelCase : str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
_UpperCAmelCase : int = model(input_ids=A , bbox=A , pixel_values=A , training=A )
# verify the logits
_UpperCAmelCase : Optional[int] = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape , A )
_UpperCAmelCase : Any = tf.constant(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , A , atol=1E-4 ) )
| 506 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = int(SCREAMING_SNAKE_CASE__ )
if n_element < 1:
__lowerCamelCase : str = ValueError('a should be a positive number' )
raise my_error
__lowerCamelCase : Tuple = [1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = (0, 0, 0)
__lowerCamelCase : Any = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowercase_ = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
lowercase_ = hamming(int(n))
print('-----------------------------------------------------')
print(F"""The list with nth numbers is: {hamming_numbers}""")
print('-----------------------------------------------------')
| 669 | 0 |
"""simple docstring"""
_UpperCamelCase = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 179 |
import unittest
from knapsack import greedy_knapsack as kp
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = [10, 20, 30, 40, 50, 60]
__lowerCamelCase : List[str] = [2, 4, 6, 8, 10, 12]
__lowerCamelCase : Tuple = 100
self.assertEqual(kp.calc_profit(a , a , a ) , 210 )
def _snake_case ( self: str ):
self.assertRaisesRegex(a , 'max_weight must greater than zero.' )
def _snake_case ( self: List[str] ):
self.assertRaisesRegex(a , 'Weight can not be negative.' )
def _snake_case ( self: Dict ):
self.assertRaisesRegex(a , 'Profit can not be negative.' )
def _snake_case ( self: List[str] ):
self.assertRaisesRegex(a , 'max_weight must greater than zero.' )
def _snake_case ( self: Any ):
self.assertRaisesRegex(
a , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 669 | 0 |
'''simple docstring'''
def snake_case_ ( __snake_case : List[Any] , __snake_case : str) -> Union[str, Any]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(SCREAMING_SNAKE_CASE__ , int(b / 2)) * actual_power(SCREAMING_SNAKE_CASE__ , int(b / 2))
else:
return a * actual_power(SCREAMING_SNAKE_CASE__ , int(b / 2)) * actual_power(SCREAMING_SNAKE_CASE__ , int(b / 2))
def snake_case_ ( __snake_case : Dict , __snake_case : Optional[Any]) -> Tuple:
if b < 0:
return 1 / actual_power(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
return actual_power(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(power(-2, -3))
| 274 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any]=2 , a: str=3 , a: Any=4 , a: Union[str, Any]=2 , a: Tuple=7 , a: int=True , a: Tuple=True , a: List[str]=True , a: Union[str, Any]=True , a: str=99 , a: Tuple=36 , a: int=2 , a: Dict=4 , a: Union[str, Any]=37 , a: List[str]="gelu" , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Dict=512 , a: Union[str, Any]=16 , a: str=2 , a: int=0.0_2 , a: Optional[Any]=6 , a: Optional[int]=6 , a: Dict=3 , a: Optional[Any]=4 , a: Optional[Any]=None , a: Dict=1000 , ):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : Optional[int] = num_channels
__lowerCamelCase : str = image_size
__lowerCamelCase : int = patch_size
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : Dict = use_input_mask
__lowerCamelCase : Any = use_token_type_ids
__lowerCamelCase : List[str] = use_labels
__lowerCamelCase : str = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : List[Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : Dict = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : int = type_sequence_label_size
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : List[str] = coordinate_size
__lowerCamelCase : int = shape_size
__lowerCamelCase : Union[str, Any] = num_labels
__lowerCamelCase : int = num_choices
__lowerCamelCase : int = scope
__lowerCamelCase : Any = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCamelCase : Any = text_seq_length
__lowerCamelCase : Optional[Any] = (image_size // patch_size) ** 2 + 1
__lowerCamelCase : Any = self.text_seq_length + self.image_seq_length
def _snake_case ( self: List[str] ):
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCamelCase : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCamelCase : List[str] = bbox[i, j, 3]
__lowerCamelCase : str = bbox[i, j, 1]
__lowerCamelCase : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCamelCase : Tuple = bbox[i, j, 2]
__lowerCamelCase : Any = bbox[i, j, 0]
__lowerCamelCase : List[str] = tmp_coordinate
__lowerCamelCase : str = tf.constant(a )
__lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Any = None
if self.use_input_mask:
__lowerCamelCase : int = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCamelCase : Tuple = None
if self.use_token_type_ids:
__lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = None
if self.use_labels:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCamelCase : Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self: Tuple , a: List[Any] , a: Any , a: List[str] , a: Dict , a: Optional[Any] , a: Dict ):
__lowerCamelCase : Optional[Any] = TFLayoutLMvaModel(config=a )
# text + image
__lowerCamelCase : Optional[Any] = model(a , pixel_values=a , training=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , training=a , )
__lowerCamelCase : List[Any] = model(a , bbox=a , pixel_values=a , training=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCamelCase : List[Any] = model(a , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCamelCase : Optional[Any] = model({'pixel_values': pixel_values} , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self: Dict , a: Dict , a: Optional[Any] , a: int , a: Optional[int] , a: List[str] , a: List[str] , a: List[str] ):
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : str = TFLayoutLMvaForSequenceClassification(config=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Tuple , a: Optional[Any] , a: List[Any] ):
__lowerCamelCase : Union[str, Any] = self.num_labels
__lowerCamelCase : Any = TFLayoutLMvaForTokenClassification(config=a )
__lowerCamelCase : Optional[Any] = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self: Dict , a: Optional[Any] , a: str , a: Dict , a: Union[str, Any] , a: List[Any] , a: Optional[int] , a: List[str] ):
__lowerCamelCase : List[Any] = 2
__lowerCamelCase : Any = TFLayoutLMvaForQuestionAnswering(config=a )
__lowerCamelCase : Any = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , training=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = self.prepare_config_and_inputs()
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = config_and_inputs
__lowerCamelCase : Tuple = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def _snake_case ( self: int , a: List[str] , a: Any , a: Optional[Any] , a: Tuple , a: Tuple ):
return True
def _snake_case ( self: str , a: Any , a: Any , a: Optional[int]=False ):
__lowerCamelCase : List[str] = copy.deepcopy(a )
if model_class in get_values(a ):
__lowerCamelCase : Tuple = {
k: tf.tile(tf.expand_dims(a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a ):
__lowerCamelCase : Any = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Dict = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _snake_case ( self: Tuple ):
__lowerCamelCase : int = TFLayoutLMvaModelTester(self )
__lowerCamelCase : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _snake_case ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : int = model_class(a )
if getattr(a , 'hf_compute_loss' , a ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCamelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=a )[0]
]
__lowerCamelCase : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCamelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : Dict = prepared_for_class.pop('input_ids' )
__lowerCamelCase : str = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCamelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : List[str] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__lowerCamelCase : int = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCamelCase : Tuple = -100
__lowerCamelCase : Tuple = tf.convert_to_tensor(a )
__lowerCamelCase : Tuple = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCamelCase : int = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : str = model(a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCamelCase : str = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
# Get keys that were added with the _prepare_for_class function
__lowerCamelCase : Optional[Any] = prepared_for_class.keys() - inputs_dict.keys()
__lowerCamelCase : List[Any] = inspect.signature(model.call ).parameters
__lowerCamelCase : List[str] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCamelCase : Optional[int] = {0: 'input_ids'}
for label_key in label_keys:
__lowerCamelCase : Dict = signature_names.index(a )
__lowerCamelCase : str = label_key
__lowerCamelCase : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCamelCase : Optional[int] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCamelCase : Optional[int] = prepared_for_class[value]
__lowerCamelCase : Any = tuple(a )
# Send to model
__lowerCamelCase : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _snake_case ( self: List[str] ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: int ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: Dict ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
a , a , a , a , a , a , a )
@slow
def _snake_case ( self: int ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Dict = TFLayoutLMvaModel.from_pretrained(a )
self.assertIsNotNone(a )
def UpperCamelCase__ ( ):
__lowerCamelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self: Optional[int] ):
return LayoutLMvaImageProcessor(apply_ocr=a ) if is_vision_available() else None
@slow
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Tuple = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__lowerCamelCase : Union[str, Any] = self.default_image_processor
__lowerCamelCase : List[Any] = prepare_img()
__lowerCamelCase : str = image_processor(images=a , return_tensors='tf' ).pixel_values
__lowerCamelCase : Union[str, Any] = tf.constant([[1, 2]] )
__lowerCamelCase : str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCamelCase : int = model(input_ids=a , bbox=a , pixel_values=a , training=a )
# verify the logits
__lowerCamelCase : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , a )
__lowerCamelCase : Any = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1e-4 ) )
| 669 | 0 |
'''simple docstring'''
A = 'Input must be a string of 8 numbers plus letter'
A = 'TRWAGMYFPDXBNJZSQVHLCKE'
def UpperCAmelCase ( UpperCAmelCase__ : List[str]):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
lowerCamelCase : Union[str, Any] = F'''Expected string as input, found {type(SCREAMING_SNAKE_CASE__).__name__}'''
raise TypeError(SCREAMING_SNAKE_CASE__)
lowerCamelCase : Optional[Any] = spanish_id.replace('-' , '').upper()
if len(SCREAMING_SNAKE_CASE__) != 9:
raise ValueError(SCREAMING_SNAKE_CASE__)
try:
lowerCamelCase : Dict = int(spanish_id_clean[0:8])
lowerCamelCase : List[str] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(SCREAMING_SNAKE_CASE__) from ex
if letter.isdigit():
raise ValueError(SCREAMING_SNAKE_CASE__)
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = CLIPTokenizer
__snake_case = CLIPTokenizerFast
__snake_case = True
__snake_case = {}
__snake_case = False
def _snake_case ( self: Union[str, Any] ):
super().setUp()
# fmt: off
__lowerCamelCase : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__lowerCamelCase : Tuple = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
__lowerCamelCase : Tuple = {'unk_token': '<unk>'}
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _snake_case ( self: Tuple , **a: Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Union[str, Any] , **a: List[str] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : Tuple = 'lower newer'
__lowerCamelCase : Tuple = 'lower newer'
return input_text, output_text
def _snake_case ( self: List[str] ):
__lowerCamelCase : List[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase : Optional[Any] = 'lower newer'
__lowerCamelCase : int = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
__lowerCamelCase : Optional[int] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase : int = tokens + [tokenizer.unk_token]
__lowerCamelCase : int = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@require_ftfy
def _snake_case ( self: Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : str = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
__lowerCamelCase : Optional[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[Any] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__lowerCamelCase : List[Any] = 'xa\u0303y' + ' ' + 'x\xe3y'
__lowerCamelCase : Tuple = tokenizer_s.tokenize(a )
__lowerCamelCase : Any = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of space type
__lowerCamelCase : List[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__lowerCamelCase : List[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[int] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of line break type
__lowerCamelCase : str = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__lowerCamelCase : Dict = tokenizer_s.tokenize(a )
__lowerCamelCase : List[str] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
def _snake_case ( self: List[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCamelCase : Optional[int] = F'{text_of_1_token} {text_of_1_token}'
__lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase : List[Any] = F' {text}'
__lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
def _snake_case ( self: str ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(a ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def _snake_case ( self: Tuple ):
super().test_tokenization_python_rust_equals()
def _snake_case ( self: Tuple ):
# CLIP always lower cases letters
pass
| 669 | 0 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
a__ = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
a__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def A__ (snake_case : Tuple ) -> str:
if "://" in dataset_path:
__UpperCamelCase : List[str] = dataset_path.split("""://""" )[1]
return dataset_path
def A__ (snake_case : List[Any] ) -> int:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def A__ (snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : int ) -> Union[str, Any]:
__UpperCamelCase : Any = not is_remote_filesystem(SCREAMING_SNAKE_CASE__ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(SCREAMING_SNAKE_CASE__ ) , fs._strip_protocol(SCREAMING_SNAKE_CASE__ ) )
else:
fs.mv(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , recursive=SCREAMING_SNAKE_CASE__ )
def A__ () -> Optional[int]:
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__UpperCamelCase : Any = None
__UpperCamelCase : List[Any] = None
__UpperCamelCase : Tuple = threading.Lock()
| 279 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowercase_ = False
try:
lowercase_ = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class A_ :
'''simple docstring'''
def __init__( self: int , a: str = None , a: list = [] ):
__lowerCamelCase : Dict = 0
__lowerCamelCase : Dict = choices
__lowerCamelCase : Tuple = prompt
if sys.platform == "win32":
__lowerCamelCase : Union[str, Any] = '*'
else:
__lowerCamelCase : Any = '➔ '
def _snake_case ( self: Any , a: Tuple , a: str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a )
else:
forceWrite(self.choices[index] , a )
def _snake_case ( self: Tuple , a: int ):
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(a )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _snake_case ( self: Optional[int] , a: Direction , a: int = 1 ):
__lowerCamelCase : str = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a )
move_cursor(a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def _snake_case ( self: Tuple ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def _snake_case ( self: Optional[int] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def _snake_case ( self: str ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def _snake_case ( self: Union[str, Any] ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a )] for number in range(10 )] )
def _snake_case ( self: str ):
__lowerCamelCase : List[Any] = int(chr(self.current_selection ) )
__lowerCamelCase : Any = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a )
else:
return
else:
return
def _snake_case ( self: str , a: int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
__lowerCamelCase : Dict = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
__lowerCamelCase : Any = int(builtins.input() )
except ValueError:
__lowerCamelCase : str = default_choice
else:
__lowerCamelCase : Optional[int] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(a , '\n' )
return choice
| 669 | 0 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
_UpperCAmelCase = TOKENIZER_CLASSES
else:
_UpperCAmelCase = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE__ , tokenizer_name + '''Fast''' )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
_UpperCAmelCase = TOKENIZER_CLASSES[tokenizer_name]
_UpperCAmelCase = True
if checkpoint_name is None:
_UpperCAmelCase = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_UpperCAmelCase = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
_UpperCAmelCase = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
_UpperCAmelCase = checkpoint.split('''/''' )
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif add_prefix:
_UpperCAmelCase = checkpoint
_UpperCAmelCase = dump_path
else:
_UpperCAmelCase = None
_UpperCAmelCase = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_UpperCAmelCase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_UpperCAmelCase = file_path.split(SCREAMING_SNAKE_CASE__ )[-1][0]
if next_char == "/":
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
_UpperCAmelCase = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ , filename_prefix=SCREAMING_SNAKE_CASE__ )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(SCREAMING_SNAKE_CASE__ )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
f'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
UpperCAmelCase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download) | 32 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = (CMStochasticIterativeScheduler,)
__snake_case = 10
def _snake_case ( self: Any , **a: Dict ):
__lowerCamelCase : Optional[Any] = {
'num_train_timesteps': 201,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
config.update(**a )
return config
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Any = 10
__lowerCamelCase : Any = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = self.scheduler_classes[0](**a )
scheduler.set_timesteps(a )
__lowerCamelCase : Any = scheduler.timesteps[0]
__lowerCamelCase : List[str] = scheduler.timesteps[1]
__lowerCamelCase : Union[str, Any] = self.dummy_sample
__lowerCamelCase : int = 0.1 * sample
__lowerCamelCase : Optional[Any] = scheduler.step(a , a , a ).prev_sample
__lowerCamelCase : List[str] = scheduler.step(a , a , a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self: Optional[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a )
def _snake_case ( self: List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : Tuple = self.scheduler_classes[0]
__lowerCamelCase : Tuple = self.get_scheduler_config()
__lowerCamelCase : Tuple = scheduler_class(**a )
__lowerCamelCase : int = 1
scheduler.set_timesteps(a )
__lowerCamelCase : Optional[int] = scheduler.timesteps
__lowerCamelCase : List[str] = torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(a ):
# 1. scale model input
__lowerCamelCase : List[str] = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Optional[int] = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : str = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : str = pred_prev_sample
__lowerCamelCase : List[str] = torch.sum(torch.abs(a ) )
__lowerCamelCase : str = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 1_9_2.7_6_1_4 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_1_0 ) < 1e-3
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Optional[Any] = self.get_scheduler_config()
__lowerCamelCase : int = scheduler_class(**a )
__lowerCamelCase : List[Any] = [106, 0]
scheduler.set_timesteps(timesteps=a )
__lowerCamelCase : Dict = scheduler.timesteps
__lowerCamelCase : int = torch.manual_seed(0 )
__lowerCamelCase : Any = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__lowerCamelCase : Tuple = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Tuple = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : Any = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : Any = pred_prev_sample
__lowerCamelCase : Dict = torch.sum(torch.abs(a ) )
__lowerCamelCase : Optional[Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 3_4_7.6_3_5_7 ) < 1e-2
assert abs(result_mean.item() - 0.4_5_2_7 ) < 1e-3
def _snake_case ( self: Tuple ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : int = self.get_scheduler_config()
__lowerCamelCase : List[Any] = scheduler_class(**a )
__lowerCamelCase : Optional[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(a , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a )
def _snake_case ( self: int ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [39, 30, 12, 1, 0]
__lowerCamelCase : List[Any] = len(a )
with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Dict = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a )
| 669 | 0 |
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class _snake_case ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ =PegasusTokenizer
UpperCamelCase__ =PegasusTokenizerFast
UpperCamelCase__ =True
UpperCamelCase__ =True
def snake_case_ ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ :List[str] = PegasusTokenizer(snake_case )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case_ ( self : List[Any] ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def snake_case_ ( self : Tuple , **snake_case : List[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def snake_case_ ( self : List[Any] , snake_case : int ):
return ("This is a test", "This is a test")
def snake_case_ ( self : Any ):
UpperCAmelCase_ :Dict = '</s>'
UpperCAmelCase_ :List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def snake_case_ ( self : Optional[Any] ):
UpperCAmelCase_ :Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(snake_case ) , 1_103 )
def snake_case_ ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_103 )
def snake_case_ ( self : Dict ):
UpperCAmelCase_ :Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase_ :List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase_ :Tuple = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
UpperCAmelCase_ :Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=snake_case , add_special_tokens=snake_case ).input_ids[0]
UpperCAmelCase_ :str = py_tokenizer([raw_input_str] , return_tensors=snake_case , add_special_tokens=snake_case ).input_ids[0]
self.assertListEqual(snake_case , snake_case )
def snake_case_ ( self : int ):
UpperCAmelCase_ :Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase_ :Tuple = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
UpperCAmelCase_ :Optional[Any] = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1]
UpperCAmelCase_ :Optional[Any] = tokenizer([raw_input_str] , return_tensors=snake_case ).input_ids[0]
self.assertListEqual(snake_case , snake_case )
def snake_case_ ( self : Dict ):
UpperCAmelCase_ :Any = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96_103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_024
UpperCAmelCase_ :int = 'To ensure a smooth flow of bank resolutions.'
UpperCAmelCase_ :Union[str, Any] = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1]
UpperCAmelCase_ :List[str] = tokenizer([raw_input_str] , return_tensors=snake_case ).input_ids[0]
self.assertListEqual(snake_case , snake_case )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def snake_case_ ( self : str ):
UpperCAmelCase_ :List[str] = ['This is going to be way too long.' * 150, 'short example']
UpperCAmelCase_ :Tuple = ['not super long but more than 5 tokens', 'tiny']
UpperCAmelCase_ :Union[str, Any] = self._large_tokenizer(snake_case , padding=snake_case , truncation=snake_case , return_tensors='''pt''' )
UpperCAmelCase_ :List[str] = self._large_tokenizer(
text_target=snake_case , max_length=5 , padding=snake_case , truncation=snake_case , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1_024)
assert batch.attention_mask.shape == (2, 1_024)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case ) == 2 # input_ids, attention_mask.
@slow
def snake_case_ ( self : List[str] ):
# fmt: off
UpperCAmelCase_ :Tuple = {'input_ids': [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class _snake_case ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ =PegasusTokenizer
UpperCamelCase__ =PegasusTokenizerFast
UpperCamelCase__ =True
UpperCamelCase__ =True
def snake_case_ ( self : str ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ :str = PegasusTokenizer(snake_case , offset=0 , mask_token_sent=snake_case , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case_ ( self : List[str] ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def snake_case_ ( self : Union[str, Any] , **snake_case : Dict ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def snake_case_ ( self : List[str] , snake_case : Any ):
return ("This is a test", "This is a test")
def snake_case_ ( self : Any ):
UpperCAmelCase_ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase_ :Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase_ :Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
UpperCAmelCase_ :int = rust_tokenizer([raw_input_str] , return_tensors=snake_case , add_special_tokens=snake_case ).input_ids[0]
UpperCAmelCase_ :str = py_tokenizer([raw_input_str] , return_tensors=snake_case , add_special_tokens=snake_case ).input_ids[0]
self.assertListEqual(snake_case , snake_case )
@require_torch
def snake_case_ ( self : Union[str, Any] ):
UpperCAmelCase_ :Union[str, Any] = ['This is going to be way too long.' * 1_000, 'short example']
UpperCAmelCase_ :Tuple = ['not super long but more than 5 tokens', 'tiny']
UpperCAmelCase_ :str = self._large_tokenizer(snake_case , padding=snake_case , truncation=snake_case , return_tensors='''pt''' )
UpperCAmelCase_ :Any = self._large_tokenizer(
text_target=snake_case , max_length=5 , padding=snake_case , truncation=snake_case , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4_096)
assert batch.attention_mask.shape == (2, 4_096)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case ) == 2 # input_ids, attention_mask.
def snake_case_ ( self : Any ):
UpperCAmelCase_ :int = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
UpperCAmelCase_ :Dict = self._large_tokenizer(snake_case ).input_ids
self.assertListEqual(
snake_case , [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] , )
| 608 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowercase_ = input('Enter image url: ').strip()
print(F"""Downloading image from {url} ...""")
lowercase_ = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
lowercase_ = soup.find('meta', {'property': 'og:image'})['content']
lowercase_ = requests.get(image_url).content
lowercase_ = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 669 | 0 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowerCamelCase ( _snake_case : List[str] ,_snake_case : str ,_snake_case : Optional[Any] ):
'''simple docstring'''
lowercase__ = 0
if start < end:
lowercase__ = randint(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowercase__ = a[end]
lowercase__ = a[pivot]
lowercase__ = temp
lowercase__ = _in_place_partition(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ ,p + 1 ,SCREAMING_SNAKE_CASE__ )
return count
def lowerCamelCase ( _snake_case : List[str] ,_snake_case : Union[str, Any] ,_snake_case : Tuple ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = randint(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowercase__ = a[end]
lowercase__ = a[pivot]
lowercase__ = temp
lowercase__ = start - 1
for index in range(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowercase__ = new_pivot_index + 1
lowercase__ = a[new_pivot_index]
lowercase__ = a[index]
lowercase__ = temp
lowercase__ = a[new_pivot_index + 1]
lowercase__ = a[end]
lowercase__ = temp
return new_pivot_index + 1, count
SCREAMING_SNAKE_CASE__ = TemporaryFile()
SCREAMING_SNAKE_CASE__ = 100 # 1000 elements are to be sorted
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0, 1 # mean and standard deviation
SCREAMING_SNAKE_CASE__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
SCREAMING_SNAKE_CASE__ = np.load(outfile)
SCREAMING_SNAKE_CASE__ = len(M) - 1
SCREAMING_SNAKE_CASE__ = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 267 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowercase_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowercase_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
lowercase_ = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
lowercase_ = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
lowercase_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
lowercase_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 669 | 0 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : List[str]) -> Any:
'''simple docstring'''
if height >= 1:
move_tower(height - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
move_disk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
move_tower(height - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : Any) -> List[str]:
'''simple docstring'''
print("moving disk from" , SCREAMING_SNAKE_CASE__ , "to" , SCREAMING_SNAKE_CASE__)
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = int(input("Height of hanoi: ").strip())
move_tower(SCREAMING_SNAKE_CASE__ , "A" , "B" , "C")
if __name__ == "__main__":
main() | 557 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """xlm-roberta"""
def __init__( self: Optional[Any] , a: int=3_0522 , a: List[Any]=768 , a: Tuple=12 , a: List[str]=12 , a: Dict=3072 , a: List[str]="gelu" , a: Any=0.1 , a: Optional[Any]=0.1 , a: str=512 , a: Optional[int]=2 , a: int=0.0_2 , a: str=1e-12 , a: str=1 , a: List[Any]=0 , a: Dict=2 , a: Dict="absolute" , a: List[Any]=True , a: str=None , **a: List[Any] , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase : Optional[Any] = vocab_size
__lowerCamelCase : Optional[Any] = hidden_size
__lowerCamelCase : Dict = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : str = hidden_act
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : int = max_position_embeddings
__lowerCamelCase : Any = type_vocab_size
__lowerCamelCase : int = initializer_range
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : List[Any] = position_embedding_type
__lowerCamelCase : List[str] = use_cache
__lowerCamelCase : Optional[int] = classifier_dropout
class A_ ( __UpperCamelCase ):
'''simple docstring'''
@property
def _snake_case ( self: Optional[Any] ):
if self.task == "multiple-choice":
__lowerCamelCase : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : List[Any] = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _UpperCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
a_ = """deformable_detr"""
a_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Tuple , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : Dict=3_0_0 , lowerCAmelCase_ : str=1_0_2_4 , lowerCAmelCase_ : Union[str, Any]=6 , lowerCAmelCase_ : List[Any]=1_0_2_4 , lowerCAmelCase_ : Optional[int]=8 , lowerCAmelCase_ : List[str]=6 , lowerCAmelCase_ : Dict=1_0_2_4 , lowerCAmelCase_ : Dict=8 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]="relu" , lowerCAmelCase_ : Any=2_5_6 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : int=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : List[Any]=0.02 , lowerCAmelCase_ : Optional[Any]=1.0 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Tuple="sine" , lowerCAmelCase_ : int="resnet50" , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : List[str]=4 , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Optional[Any]=3_0_0 , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : Optional[int]=5 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Union[str, Any]=1 , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : Optional[int]=5 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Union[str, Any]=0.25 , lowerCAmelCase_ : Dict=False , **lowerCAmelCase_ : List[Any] , ) -> List[str]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
__lowerCAmelCase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowerCAmelCase = backbone_config.get('model_type' )
__lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCAmelCase = config_class.from_dict(lowerCAmelCase_ )
__lowerCAmelCase = use_timm_backbone
__lowerCAmelCase = backbone_config
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_queries
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = init_xavier_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = auxiliary_loss
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = backbone
__lowerCAmelCase = use_pretrained_backbone
__lowerCAmelCase = dilation
# deformable attributes
__lowerCAmelCase = num_feature_levels
__lowerCAmelCase = encoder_n_points
__lowerCAmelCase = decoder_n_points
__lowerCAmelCase = two_stage
__lowerCAmelCase = two_stage_num_proposals
__lowerCAmelCase = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
__lowerCAmelCase = class_cost
__lowerCAmelCase = bbox_cost
__lowerCAmelCase = giou_cost
# Loss coefficients
__lowerCAmelCase = mask_loss_coefficient
__lowerCAmelCase = dice_loss_coefficient
__lowerCAmelCase = bbox_loss_coefficient
__lowerCAmelCase = giou_loss_coefficient
__lowerCAmelCase = eos_coefficient
__lowerCAmelCase = focal_alpha
__lowerCAmelCase = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowercase ( self : Union[str, Any] ) -> List[str]:
return self.encoder_attention_heads
@property
def lowercase ( self : str ) -> List[Any]:
return self.d_model
def lowercase ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__lowerCAmelCase = self.backbone_config.to_dict()
__lowerCAmelCase = self.__class__.model_type
return output
| 53 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ConsistencyModelPipeline
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__snake_case = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def _snake_case ( self: str ):
__lowerCamelCase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[str] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _snake_case ( self: int , a: str=False ):
if class_cond:
__lowerCamelCase : str = self.dummy_cond_unet
else:
__lowerCamelCase : str = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCamelCase : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _snake_case ( self: int , a: List[str] , a: Any=0 ):
if str(a ).startswith('mps' ):
__lowerCamelCase : List[Any] = torch.manual_seed(a )
else:
__lowerCamelCase : Tuple = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : Optional[Any] = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components()
__lowerCamelCase : str = ConsistencyModelPipeline(**a )
__lowerCamelCase : str = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Any = self.get_dummy_inputs(a )
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[int] = ConsistencyModelPipeline(**a )
__lowerCamelCase : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(a )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : List[str] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Optional[int] = self.get_dummy_components()
__lowerCamelCase : Tuple = ConsistencyModelPipeline(**a )
__lowerCamelCase : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Tuple = self.get_dummy_inputs(a )
__lowerCamelCase : str = 1
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Any = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[int] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: List[str] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[Any] = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[Any] = ConsistencyModelPipeline(**a )
__lowerCamelCase : List[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_dummy_inputs(a )
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[str] = None
__lowerCamelCase : str = 0
__lowerCamelCase : Tuple = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self: Optional[int] , a: str=0 , a: Tuple=False , a: Tuple="cpu" , a: List[str]=torch.floataa , a: Optional[Any]=(1, 3, 64, 64) ):
__lowerCamelCase : Optional[Any] = torch.manual_seed(a )
__lowerCamelCase : Optional[int] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__lowerCamelCase : Dict = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
__lowerCamelCase : Optional[Any] = latents
return inputs
def _snake_case ( self: Any , a: Any=0 , a: List[str]="cpu" , a: Optional[Any]=torch.floataa , a: int=(1, 3, 64, 64) ):
if type(a ) == str:
__lowerCamelCase : Dict = torch.device(a )
__lowerCamelCase : Union[str, Any] = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : str = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _snake_case ( self: str ):
__lowerCamelCase : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs()
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_inputs()
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Tuple = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : int = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case ( self: Dict ):
__lowerCamelCase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : str = self.get_inputs(get_fixed_latents=a , device=a )
__lowerCamelCase : str = 1
__lowerCamelCase : Union[str, Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : str = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 669 | 0 |
'''simple docstring'''
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : Dict = 1
_a : str = 2
while i * i <= n:
_a : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCAmelCase_ ():
"""simple docstring"""
_a : str = 1
_a : List[str] = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE__ ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 229 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """trocr"""
__snake_case = ["""past_key_values"""]
__snake_case = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Dict , a: List[str]=5_0265 , a: Optional[Any]=1024 , a: Tuple=12 , a: Dict=16 , a: Optional[Any]=4096 , a: Optional[Any]="gelu" , a: Optional[int]=512 , a: int=0.1 , a: str=0.0 , a: Union[str, Any]=0.0 , a: Any=2 , a: Optional[int]=0.0_2 , a: Optional[Any]=0.0 , a: List[Any]=True , a: Any=False , a: int=True , a: Optional[Any]=True , a: Tuple=1 , a: Union[str, Any]=0 , a: Any=2 , **a: List[Any] , ):
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : Union[str, Any] = d_model
__lowerCamelCase : List[str] = decoder_layers
__lowerCamelCase : Optional[Any] = decoder_attention_heads
__lowerCamelCase : List[str] = decoder_ffn_dim
__lowerCamelCase : Optional[int] = activation_function
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : Dict = dropout
__lowerCamelCase : int = attention_dropout
__lowerCamelCase : List[str] = activation_dropout
__lowerCamelCase : Union[str, Any] = init_std
__lowerCamelCase : Tuple = decoder_layerdrop
__lowerCamelCase : str = use_cache
__lowerCamelCase : List[Any] = scale_embedding
__lowerCamelCase : Any = use_learned_position_embeddings
__lowerCamelCase : List[Any] = layernorm_embedding
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
| 669 | 0 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : Tuple = 5000_0000 ):
_UpperCAmelCase : Dict = set()
_UpperCAmelCase : Union[str, Any] = int((limit - 24) ** (1 / 2) )
_UpperCAmelCase : List[Any] = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE__ ) ) )
for primea in primes:
_UpperCAmelCase : Tuple = primea * primea
for primea in primes:
_UpperCAmelCase : Tuple = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
_UpperCAmelCase : List[Any] = primea * primea * primea * primea
_UpperCAmelCase : int = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE__ )
return len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(f"{solution() = }")
| 506 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """tokenizer"""]
__snake_case = """CLIPImageProcessor"""
__snake_case = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self: Union[str, Any] , a: int=None , a: List[str]=None , **a: str ):
__lowerCamelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : str = kwargs.pop('feature_extractor' )
__lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: List[Any]=None , a: List[str]=None , a: int=None , **a: List[Any] ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__lowerCamelCase : Dict = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
__lowerCamelCase : Tuple = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
__lowerCamelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _snake_case ( self: List[Any] , *a: Optional[Any] , **a: int ):
return self.tokenizer.batch_decode(*a , **a )
def _snake_case ( self: Any , *a: Union[str, Any] , **a: Optional[Any] ):
return self.tokenizer.decode(*a , **a )
@property
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = self.tokenizer.model_input_names
__lowerCamelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 0 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
_UpperCamelCase = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
_UpperCamelCase = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
_UpperCamelCase = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self :int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def __lowercase ( self :Dict , __lowercase :Any=None , __lowercase :int=None , __lowercase :Tuple=False ):
if concatenate_texts:
return compute_measures(__lowercase , __lowercase )["wer"]
else:
__lowerCamelCase : Tuple =0
__lowerCamelCase : Tuple =0
for prediction, reference in zip(__lowercase , __lowercase ):
__lowerCamelCase : Optional[int] =compute_measures(__lowercase , __lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 179 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _snake_case ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def _snake_case ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase : Any = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def _snake_case ( self: Dict ):
torch.manual_seed(0 )
__lowerCamelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(a )
def _snake_case ( self: List[str] ):
__lowerCamelCase : Union[str, Any] = self.dummy_uncond_unet
__lowerCamelCase : List[str] = DDIMScheduler()
__lowerCamelCase : str = self.dummy_vq_model
__lowerCamelCase : Optional[int] = LDMPipeline(unet=a , vqvae=a , scheduler=a )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
__lowerCamelCase : Optional[int] = torch.manual_seed(0 )
__lowerCamelCase : Any = ldm(generator=a , num_inference_steps=2 , output_type='numpy' ).images
__lowerCamelCase : Tuple = torch.manual_seed(0 )
__lowerCamelCase : Dict = ldm(generator=a , num_inference_steps=2 , output_type='numpy' , return_dict=a )[0]
__lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[int] = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
__lowerCamelCase : str = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
__lowerCamelCase : Dict = torch.manual_seed(0 )
__lowerCamelCase : int = ldm(generator=a , num_inference_steps=5 , output_type='numpy' ).images
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__lowerCamelCase : List[Any] = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
__lowerCamelCase : Union[str, Any] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 669 | 0 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
@register_to_config
def __init__( self , *,
_lowerCamelCase = 4 , _lowerCamelCase = 768 , _lowerCamelCase , _lowerCamelCase , ):
super().__init__()
lowerCAmelCase_ = nn.Parameter(torch.zeros(_lowerCamelCase ) )
# parameters for additional clip time embeddings
lowerCAmelCase_ = nn.Linear(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = nn.Linear(_lowerCamelCase , _lowerCamelCase )
# parameters for encoder hidden states
lowerCAmelCase_ = clip_extra_context_tokens
lowerCAmelCase_ = nn.Linear(
_lowerCamelCase , self.clip_extra_context_tokens * cross_attention_dim )
lowerCAmelCase_ = nn.Linear(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = nn.LayerNorm(_lowerCamelCase )
def UpperCAmelCase_ ( self , *, _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
lowerCAmelCase_ = image_embeddings.shape[0]
lowerCAmelCase_ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
lowerCAmelCase_ = classifier_free_guidance_embeddings.expand(
_lowerCamelCase , -1 )
lowerCAmelCase_ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
lowerCAmelCase_ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
lowerCAmelCase_ = self.embedding_proj(_lowerCamelCase )
lowerCAmelCase_ = self.clip_image_embeddings_project_to_time_embeddings(_lowerCamelCase )
lowerCAmelCase_ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
lowerCAmelCase_ = self.clip_extra_context_tokens_proj(_lowerCamelCase )
lowerCAmelCase_ = clip_extra_context_tokens.reshape(_lowerCamelCase , -1 , self.clip_extra_context_tokens )
lowerCAmelCase_ = clip_extra_context_tokens.permute(0 , 2 , 1 )
lowerCAmelCase_ = self.encoder_hidden_states_proj(_lowerCamelCase )
lowerCAmelCase_ = self.text_encoder_hidden_states_norm(_lowerCamelCase )
lowerCAmelCase_ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 274 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = 'Usage of script: script_name <size_of_canvas:int>'
lowercase_ = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = [[False for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
return canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
for i, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for j, _ in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = bool(random.getrandbits(1 ) )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = np.array(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for c, pt in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = __judge_point(
SCREAMING_SNAKE_CASE__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__lowerCamelCase : Any = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__lowerCamelCase : list[list[bool]] = current_canvas.tolist()
return return_canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Optional[Any] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__lowerCamelCase : Tuple = pt
if pt:
if alive < 2:
__lowerCamelCase : Optional[Any] = False
elif alive == 2 or alive == 3:
__lowerCamelCase : Any = True
elif alive > 3:
__lowerCamelCase : Dict = False
else:
if alive == 3:
__lowerCamelCase : Tuple = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ ,lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(['w', 'k'])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 669 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
A = logging.get_logger(__name__)
class __snake_case ( __UpperCamelCase):
def __init__( self, *A, **A ):
"""simple docstring"""
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.', A, )
super().__init__(*A, **A )
| 320 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """char"""
__snake_case = """bpe"""
__snake_case = """wp"""
lowercase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """char_tokenizer"""]
__snake_case = """ViTImageProcessor"""
__snake_case = """MgpstrTokenizer"""
def __init__( self: int , a: Dict=None , a: Optional[int]=None , **a: List[str] ):
__lowerCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : Optional[Any] = kwargs.pop('feature_extractor' )
__lowerCamelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
__lowerCamelCase : Any = tokenizer
__lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('gpt2' )
__lowerCamelCase : int = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: Optional[int]=None , a: List[Any]=None , a: int=None , **a: str ):
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCamelCase : Dict = self.image_processor(a , return_tensors=a , **a )
if text is not None:
__lowerCamelCase : Dict = self.char_tokenizer(a , return_tensors=a , **a )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase : List[str] = encodings['input_ids']
return inputs
def _snake_case ( self: List[str] , a: List[Any] ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = sequences
__lowerCamelCase : List[str] = char_preds.size(0 )
__lowerCamelCase , __lowerCamelCase : str = self._decode_helper(a , 'char' )
__lowerCamelCase , __lowerCamelCase : Optional[int] = self._decode_helper(a , 'bpe' )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self._decode_helper(a , 'wp' )
__lowerCamelCase : Tuple = []
__lowerCamelCase : List[Any] = []
for i in range(a ):
__lowerCamelCase : List[Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
__lowerCamelCase : Optional[int] = [char_strs[i], bpe_strs[i], wp_strs[i]]
__lowerCamelCase : Any = scores.index(max(a ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Optional[int] = final_strs
__lowerCamelCase : Dict = final_scores
__lowerCamelCase : Dict = char_strs
__lowerCamelCase : List[Any] = bpe_strs
__lowerCamelCase : Tuple = wp_strs
return out
def _snake_case ( self: int , a: Optional[int] , a: Optional[Any] ):
if format == DecodeType.CHARACTER:
__lowerCamelCase : Optional[Any] = self.char_decode
__lowerCamelCase : Union[str, Any] = 1
__lowerCamelCase : List[str] = '[s]'
elif format == DecodeType.BPE:
__lowerCamelCase : Dict = self.bpe_decode
__lowerCamelCase : List[str] = 2
__lowerCamelCase : Any = '#'
elif format == DecodeType.WORDPIECE:
__lowerCamelCase : List[str] = self.wp_decode
__lowerCamelCase : int = 102
__lowerCamelCase : Dict = '[SEP]'
else:
raise ValueError(F'Format {format} is not supported.' )
__lowerCamelCase , __lowerCamelCase : int = [], []
__lowerCamelCase : Tuple = pred_logits.size(0 )
__lowerCamelCase : List[Any] = pred_logits.size(1 )
__lowerCamelCase , __lowerCamelCase : Dict = pred_logits.topk(1 , dim=-1 , largest=a , sorted=a )
__lowerCamelCase : List[str] = preds_index.view(-1 , a )[:, 1:]
__lowerCamelCase : Dict = decoder(a )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = torch.nn.functional.softmax(a , dim=2 ).max(dim=2 )
__lowerCamelCase : List[str] = preds_max_prob[:, 1:]
for index in range(a ):
__lowerCamelCase : str = preds_str[index].find(a )
__lowerCamelCase : Tuple = preds_str[index][:pred_eos]
__lowerCamelCase : Any = preds_index[index].cpu().tolist()
__lowerCamelCase : Any = pred_index.index(a ) if eos_token in pred_index else -1
__lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1]
__lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(a )
conf_scores.append(a )
return dec_strs, conf_scores
def _snake_case ( self: Tuple , a: Optional[int] ):
__lowerCamelCase : Dict = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(a )]
return decode_strs
def _snake_case ( self: Optional[int] , a: Tuple ):
return self.bpe_tokenizer.batch_decode(a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : int = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(a )]
return decode_strs
| 669 | 0 |
def A__ (snake_case : Union[str, Any] , snake_case : Tuple ) -> str:
return int((input_a, input_a).count(1 ) != 0 )
def A__ () -> Optional[int]:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 279 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
__lowerCamelCase : Optional[int] = TOKENIZER_CLASSES
else:
__lowerCamelCase : Union[str, Any] = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE__ , tokenizer_name + 'Fast' )}
logger.info(f'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
__lowerCamelCase : int = TOKENIZER_CLASSES[tokenizer_name]
__lowerCamelCase : Optional[int] = True
if checkpoint_name is None:
__lowerCamelCase : List[Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowerCamelCase : Optional[Any] = [checkpoint_name]
logger.info(f'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(f'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
__lowerCamelCase : Tuple = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ )
# Save fast tokenizer
logger.info(f'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowerCamelCase , __lowerCamelCase : Tuple = checkpoint.split('/' )
__lowerCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif add_prefix:
__lowerCamelCase : Any = checkpoint
__lowerCamelCase : Dict = dump_path
else:
__lowerCamelCase : List[str] = None
__lowerCamelCase : Optional[int] = dump_path
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowerCamelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowerCamelCase : int = file_path.split(SCREAMING_SNAKE_CASE__ )[-1][0]
if next_char == "/":
__lowerCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = None
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
__lowerCamelCase : Dict = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ , filename_prefix=SCREAMING_SNAKE_CASE__ )
logger.info(f'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(SCREAMING_SNAKE_CASE__ )
logger.info(f'=> removing {file_name}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
lowercase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 0 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
UpperCAmelCase_ = "http://www.mocksite.com/file1.txt"
UpperCAmelCase_ = "\"text\": [\"foo\", \"foo\"]"
UpperCAmelCase_ = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class __UpperCamelCase :
__A : Union[str, Any] = 2_00
__A : Union[str, Any] = {"""Content-Length""": """100"""}
__A : Union[str, Any] = {}
def UpperCamelCase( self , **_UpperCamelCase ):
return [bytes(_UpperCamelCase , '''utf-8''' )]
def A__ ( *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[int]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any:
"""simple docstring"""
import requests
monkeypatch.setattr(SCREAMING_SNAKE_CASE__ , '''request''' , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = URL
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase = url
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase = [url]
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase = {'train': url}
_UpperCAmelCase = 'dummy'
_UpperCAmelCase = 'downloads'
_UpperCAmelCase = tmp_path
_UpperCAmelCase = DownloadConfig(
cache_dir=os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , use_etag=SCREAMING_SNAKE_CASE__ , )
_UpperCAmelCase = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE__ , download_config=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = dl_manager.download(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase = [downloaded_paths]
_UpperCAmelCase = [urls]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert "train" in downloaded_paths.keys()
_UpperCAmelCase = downloaded_paths.values()
_UpperCAmelCase = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_UpperCAmelCase = Path(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_UpperCAmelCase = downloaded_path.read_text()
assert content == CONTENT
_UpperCAmelCase = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
_UpperCAmelCase = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def A__ ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE__ )
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase = filename
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase = [filename]
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase = {'train': filename}
_UpperCAmelCase = 'dummy'
_UpperCAmelCase = xz_file.parent
_UpperCAmelCase = 'extracted'
_UpperCAmelCase = DownloadConfig(
cache_dir=SCREAMING_SNAKE_CASE__ , use_etag=SCREAMING_SNAKE_CASE__ , )
_UpperCAmelCase = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE__ , download_config=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = dl_manager.extract(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = paths
for extracted_paths in [extracted_paths]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase = [extracted_paths]
_UpperCAmelCase = [paths]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert "train" in extracted_paths.keys()
_UpperCAmelCase = extracted_paths.values()
_UpperCAmelCase = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_UpperCAmelCase = Path(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = extracted_path.parts
assert parts[-1] == hash_url_to_filename(SCREAMING_SNAKE_CASE__ , etag=SCREAMING_SNAKE_CASE__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_UpperCAmelCase = extracted_path.read_text()
_UpperCAmelCase = text_file.read_text()
assert extracted_file_content == expected_file_content
def A__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
"""simple docstring"""
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(SCREAMING_SNAKE_CASE__ , start=1 ):
_UpperCAmelCase = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def A__ ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = request.getfixturevalue(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE__ ) , start=1 ):
_test_jsonl(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def A__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = request.getfixturevalue(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE__ ) , start=1 ):
_test_jsonl(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert num_tar == 1
assert num_jsonl == 2
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(SCREAMING_SNAKE_CASE__ ) , start=1 ):
assert os.path.basename(SCREAMING_SNAKE_CASE__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2 | 32 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : List[str] = PegasusTokenizer(a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self: List[Any] ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def _snake_case ( self: Tuple , **a: List[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: List[Any] , a: int ):
return ("This is a test", "This is a test")
def _snake_case ( self: Any ):
__lowerCamelCase : Dict = '</s>'
__lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(a ) , 1103 )
def _snake_case ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
__lowerCamelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
__lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
def _snake_case ( self: int ):
__lowerCamelCase : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__lowerCamelCase : Tuple = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
__lowerCamelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
__lowerCamelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
__lowerCamelCase : int = 'To ensure a smooth flow of bank resolutions.'
__lowerCamelCase : Union[str, Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
__lowerCamelCase : List[str] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _snake_case ( self: str ):
__lowerCamelCase : List[str] = ['This is going to be way too long.' * 150, 'short example']
__lowerCamelCase : Tuple = ['not super long but more than 5 tokens', 'tiny']
__lowerCamelCase : Union[str, Any] = self._large_tokenizer(a , padding=a , truncation=a , return_tensors='pt' )
__lowerCamelCase : List[str] = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
@slow
def _snake_case ( self: List[str] ):
# fmt: off
__lowerCamelCase : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: str ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : str = PegasusTokenizer(a , offset=0 , mask_token_sent=a , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self: List[str] ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def _snake_case ( self: Union[str, Any] , **a: Dict ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: List[str] , a: Any ):
return ("This is a test", "This is a test")
def _snake_case ( self: Any ):
__lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
__lowerCamelCase : int = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
__lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
@require_torch
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Union[str, Any] = ['This is going to be way too long.' * 1000, 'short example']
__lowerCamelCase : Tuple = ['not super long but more than 5 tokens', 'tiny']
__lowerCamelCase : str = self._large_tokenizer(a , padding=a , truncation=a , return_tensors='pt' )
__lowerCamelCase : Any = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
def _snake_case ( self: Any ):
__lowerCamelCase : int = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
__lowerCamelCase : Dict = self._large_tokenizer(a ).input_ids
self.assertListEqual(
a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 669 | 0 |
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__lowerCamelCase = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
__lowerCamelCase = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
__lowerCamelCase = R"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {\'accuracy\': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
'''simple docstring'''
def snake_case_ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def snake_case_ ( self : str , snake_case : Optional[int] , snake_case : Dict ):
UpperCAmelCase_ :List[Any] = 0.0
for i, j in zip(snake_case , snake_case ):
n_correct += 1.0 if math_equivalence.is_equiv(snake_case , snake_case ) else 0.0
UpperCAmelCase_ :str = n_correct / len(snake_case )
return {
"accuracy": accuracy,
}
| 608 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=13 ,UpperCAmelCase_=7 ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=99 ,UpperCAmelCase_=16 ,UpperCAmelCase_=36 ,UpperCAmelCase_=6 ,UpperCAmelCase_=6 ,UpperCAmelCase_=6 ,UpperCAmelCase_=37 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=512 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=3 ,UpperCAmelCase_=4 ,UpperCAmelCase_=None ,) -> Optional[int]:
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = embedding_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_hidden_groups
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def _a ( self ) -> Any:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase__ = ids_tensor([self.batch_size] ,self.num_choices )
lowercase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ) -> Optional[Any]:
return AlbertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,num_hidden_groups=self.num_hidden_groups ,)
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Dict:
lowercase__ = AlbertModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ )
lowercase__ = model(UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ )
lowercase__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Dict:
lowercase__ = AlbertForPreTraining(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(
UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ ,labels=UpperCAmelCase_ ,sentence_order_label=UpperCAmelCase_ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape ,(self.batch_size, config.num_labels) )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> int:
lowercase__ = AlbertForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ ,labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Optional[int]:
lowercase__ = AlbertForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(
UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ ,start_positions=UpperCAmelCase_ ,end_positions=UpperCAmelCase_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> List[str]:
lowercase__ = self.num_labels
lowercase__ = AlbertForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ ,labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Dict:
lowercase__ = self.num_labels
lowercase__ = AlbertForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ ,labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Optional[Any]:
lowercase__ = self.num_choices
lowercase__ = AlbertForMultipleChoice(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase__ = model(
UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ ,labels=UpperCAmelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _a ( self ) -> Dict:
lowercase__ = self.prepare_config_and_inputs()
(
lowercase__
) = config_and_inputs
lowercase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case (__UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowerCAmelCase__ :List[Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ :Dict = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ :Optional[int] = True
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_=False ) -> List[Any]:
lowercase__ = super()._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ,return_labels=UpperCAmelCase_ )
if return_labels:
if model_class in get_values(UpperCAmelCase_ ):
lowercase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=UpperCAmelCase_ )
lowercase__ = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=UpperCAmelCase_ )
return inputs_dict
def _a ( self ) -> List[str]:
lowercase__ = AlbertModelTester(self )
lowercase__ = ConfigTester(self ,config_class=UpperCAmelCase_ ,hidden_size=37 )
def _a ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> List[Any]:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def _a ( self ) -> str:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_ )
def _a ( self ) -> List[Any]:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def _a ( self ) -> str:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ )
def _a ( self ) -> str:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def _a ( self ) -> List[str]:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def _a ( self ) -> List[Any]:
lowercase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
@slow
def _a ( self ) -> str:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AlbertModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_torch
class snake_case (unittest.TestCase ):
@slow
def _a ( self ) -> Tuple:
lowercase__ = AlbertModel.from_pretrained("albert-base-v2" )
lowercase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowercase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ = model(UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ )[0]
lowercase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape ,UpperCAmelCase_ )
lowercase__ = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,UpperCAmelCase_ ,atol=1E-4 ) )
| 267 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = 2
while i * i <= n:
__lowerCamelCase : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCamelCase__ ( ):
__lowerCamelCase : str = 1
__lowerCamelCase : List[str] = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 669 | 0 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> str:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__UpperCamelCase : Union[str, Any] = F'Input value of [number={number}] must be an integer'
raise TypeError(SCREAMING_SNAKE_CASE__)
if number < 1:
__UpperCamelCase : Dict = F'Input value of [number={number}] must be > 0'
raise ValueError(SCREAMING_SNAKE_CASE__)
__UpperCamelCase : str = 1
for i in range(1 , SCREAMING_SNAKE_CASE__):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 557 |
import numpy as np
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] ):
__lowerCamelCase : int = (0, 0)
__lowerCamelCase : List[str] = None
__lowerCamelCase : int = 0
__lowerCamelCase : int = 0
__lowerCamelCase : Union[str, Any] = 0
def __eq__( self: Optional[int] , a: List[Any] ):
return self.position == cell.position
def _snake_case ( self: Any ):
print(self.position )
class A_ :
'''simple docstring'''
def __init__( self: str , a: List[str]=(5, 5) ):
__lowerCamelCase : Optional[Any] = np.zeros(a )
__lowerCamelCase : List[str] = world_size[0]
__lowerCamelCase : Optional[int] = world_size[1]
def _snake_case ( self: List[Any] ):
print(self.w )
def _snake_case ( self: Optional[int] , a: str ):
__lowerCamelCase : Tuple = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__lowerCamelCase : Optional[int] = cell.position[0]
__lowerCamelCase : List[str] = cell.position[1]
__lowerCamelCase : Dict = []
for n in neughbour_cord:
__lowerCamelCase : Dict = current_x + n[0]
__lowerCamelCase : Optional[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__lowerCamelCase : Optional[Any] = Cell()
__lowerCamelCase : Any = (x, y)
__lowerCamelCase : Dict = cell
neighbours.append(a )
return neighbours
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = []
__lowerCamelCase : int = []
_open.append(SCREAMING_SNAKE_CASE__ )
while _open:
__lowerCamelCase : Union[str, Any] = np.argmin([n.f for n in _open] )
__lowerCamelCase : int = _open[min_f]
_closed.append(_open.pop(SCREAMING_SNAKE_CASE__ ) )
if current == goal:
break
for n in world.get_neigbours(SCREAMING_SNAKE_CASE__ ):
for c in _closed:
if c == n:
continue
__lowerCamelCase : Optional[int] = current.g + 1
__lowerCamelCase , __lowerCamelCase : int = n.position
__lowerCamelCase , __lowerCamelCase : Tuple = goal.position
__lowerCamelCase : Dict = (ya - ya) ** 2 + (xa - xa) ** 2
__lowerCamelCase : str = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = []
while current.parent is not None:
path.append(current.position )
__lowerCamelCase : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowercase_ = Gridworld()
# Start position and goal
lowercase_ = Cell()
lowercase_ = (0, 0)
lowercase_ = Cell()
lowercase_ = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
lowercase_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowercase_ = 1
print(world.w)
| 669 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : List[str] = logging.get_logger(__name__)
_snake_case : List[str] = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class _UpperCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
a_ = """mobilenet_v2"""
def __init__( self : str , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : str=2_2_4 , lowerCAmelCase_ : Optional[Any]=1.0 , lowerCAmelCase_ : int=8 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Any=6 , lowerCAmelCase_ : Union[str, Any]=3_2 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Optional[int]="relu6" , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Tuple=0.8 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : List[str]=0.0_01 , lowerCAmelCase_ : Optional[Any]=2_5_5 , **lowerCAmelCase_ : Any , ) -> Dict:
super().__init__(**lowerCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
__lowerCAmelCase = num_channels
__lowerCAmelCase = image_size
__lowerCAmelCase = depth_multiplier
__lowerCAmelCase = depth_divisible_by
__lowerCAmelCase = min_depth
__lowerCAmelCase = expand_ratio
__lowerCAmelCase = output_stride
__lowerCAmelCase = first_layer_is_expansion
__lowerCAmelCase = finegrained_output
__lowerCAmelCase = hidden_act
__lowerCAmelCase = tf_padding
__lowerCAmelCase = classifier_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = semantic_loss_ignore_index
class _UpperCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
a_ = version.parse("""1.11""" )
@property
def lowercase ( self : Union[str, Any] ) -> Any:
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def lowercase ( self : int ) -> int:
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def lowercase ( self : List[str] ) -> Dict:
return 1e-4
| 53 |
import math
from datetime import datetime, timedelta
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = year % 19
__lowerCamelCase : int = year % 4
__lowerCamelCase : Any = year % 7
__lowerCamelCase : Dict = math.floor(year / 100 )
__lowerCamelCase : str = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__lowerCamelCase : Optional[int] = leap_day_inhibits / 4
__lowerCamelCase : str = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__lowerCamelCase : Optional[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowerCamelCase : Optional[int] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__lowerCamelCase : Tuple = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 18 )
else:
return datetime(SCREAMING_SNAKE_CASE__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowercase_ = 'will be' if year > datetime.now().year else 'was'
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 669 | 0 |
'''simple docstring'''
from manim import *
class UpperCAmelCase__ ( __UpperCamelCase ):
"""simple docstring"""
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : str = Rectangle(height=0.5 ,width=0.5 )
_a : Tuple = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
_a : Dict = Rectangle(height=0.25 ,width=0.25 )
_a : List[Any] = [mem.copy() for i in range(6 )]
_a : Union[str, Any] = [mem.copy() for i in range(6 )]
_a : Optional[int] = VGroup(*_a ).arrange(_a ,buff=0 )
_a : int = VGroup(*_a ).arrange(_a ,buff=0 )
_a : Union[str, Any] = VGroup(_a ,_a ).arrange(_a ,buff=0 )
_a : List[str] = Text('CPU' ,font_size=24 )
_a : Optional[Any] = Group(_a ,_a ).arrange(_a ,buff=0.5 ,aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_a : Any = [mem.copy() for i in range(4 )]
_a : Union[str, Any] = VGroup(*_a ).arrange(_a ,buff=0 )
_a : Optional[int] = Text('GPU' ,font_size=24 )
_a : Optional[Any] = Group(_a ,_a ).arrange(_a ,buff=0.5 ,aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_a : Any = [mem.copy() for i in range(6 )]
_a : Dict = VGroup(*_a ).arrange(_a ,buff=0 )
_a : int = Text('Model' ,font_size=24 )
_a : Union[str, Any] = Group(_a ,_a ).arrange(_a ,buff=0.5 ,aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_a : List[str] = []
_a : Dict = []
for i, rect in enumerate(_a ):
_a : List[str] = fill.copy().set_fill(_a ,opacity=0.8 )
target.move_to(_a )
model_arr.append(_a )
_a : str = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_a ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_a )
self.add(*_a ,*_a )
_a : Optional[int] = [meta_mem.copy() for i in range(6 )]
_a : Optional[Any] = [meta_mem.copy() for i in range(6 )]
_a : Optional[int] = VGroup(*_a ).arrange(_a ,buff=0 )
_a : int = VGroup(*_a ).arrange(_a ,buff=0 )
_a : Optional[int] = VGroup(_a ,_a ).arrange(_a ,buff=0 )
_a : int = Text('Disk' ,font_size=24 )
_a : Dict = Group(_a ,_a ).arrange(_a ,buff=0.5 ,aligned_edge=_a )
disk.move_to([-4, -1.25, 0] )
self.add(_a ,_a )
_a : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a : str = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(_a ,_a )
_a : Optional[Any] = MarkupText(
F"""<span fgcolor=\'{BLUE}\'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(_a ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(_a )
_a : Dict = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(_a ) )
_a : Union[str, Any] = Square(0.3 )
input.set_fill(_a ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,_a ,buff=0.5 )
self.play(Write(_a ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=_a ,buff=0.02 )
self.play(MoveToTarget(_a ) )
self.play(FadeOut(_a ) )
_a : str = Arrow(start=_a ,end=_a ,color=_a ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,_a ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_a : Dict = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(_a ,run_time=3 ) )
_a : List[Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(_a ) ,Circumscribe(model_arr[0] ,color=_a ,**_a ) ,Circumscribe(model_cpu_arr[0] ,color=_a ,**_a ) ,Circumscribe(gpu_rect[0] ,color=_a ,**_a ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
_a : List[str] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,_a ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_a : str = AnimationGroup(
FadeOut(_a ,run_time=0.5 ) ,MoveToTarget(_a ,run_time=0.5 ) ,FadeIn(_a ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(_a )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_a : str = 0.7
self.play(
Circumscribe(model_arr[i] ,**_a ) ,Circumscribe(cpu_left_col_base[i] ,**_a ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=_a ,**_a ) ,Circumscribe(gpu_rect[0] ,color=_a ,**_a ) ,Circumscribe(model_arr[i + 1] ,color=_a ,**_a ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=_a ,**_a ) ,Circumscribe(cpu_left_col_base[-1] ,color=_a ,**_a ) ,Circumscribe(gpu_rect[0] ,color=_a ,**_a ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
_a : Optional[Any] = a_c
_a : Dict = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(_a ) ,FadeOut(_a ,run_time=0.5 ) ,)
_a : int = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a ,run_time=3 ) ,MoveToTarget(_a ) )
self.wait()
| 229 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A_ ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__snake_case = 1
@register_to_config
def __init__( self: str , a: str=2000 , a: List[str]=0.1 , a: Any=20 , a: Dict=1e-3 ):
__lowerCamelCase : Dict = None
__lowerCamelCase : Any = None
__lowerCamelCase : Optional[int] = None
def _snake_case ( self: int , a: str , a: Union[str, torch.device] = None ):
__lowerCamelCase : int = torch.linspace(1 , self.config.sampling_eps , a , device=a )
def _snake_case ( self: List[Any] , a: Union[str, Any] , a: Tuple , a: Optional[Any] , a: Dict=None ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowerCamelCase : Tuple = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowerCamelCase : Optional[int] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowerCamelCase : Optional[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowerCamelCase : List[str] = std.unsqueeze(-1 )
__lowerCamelCase : Any = -score / std
# compute
__lowerCamelCase : List[Any] = -1.0 / len(self.timesteps )
__lowerCamelCase : Any = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowerCamelCase : Dict = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowerCamelCase : int = beta_t.unsqueeze(-1 )
__lowerCamelCase : Any = -0.5 * beta_t * x
__lowerCamelCase : List[Any] = torch.sqrt(a )
__lowerCamelCase : Tuple = drift - diffusion**2 * score
__lowerCamelCase : str = x + drift * dt
# add noise
__lowerCamelCase : Any = randn_tensor(x.shape , layout=x.layout , generator=a , device=x.device , dtype=x.dtype )
__lowerCamelCase : Any = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self: Optional[int] ):
return self.config.num_train_timesteps
| 669 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_lowerCAmelCase :str = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :List[Any] = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[Any] = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_lowerCAmelCase :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 506 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = int(SCREAMING_SNAKE_CASE__ )
if n_element < 1:
__lowerCamelCase : str = ValueError('a should be a positive number' )
raise my_error
__lowerCamelCase : Tuple = [1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = (0, 0, 0)
__lowerCamelCase : Any = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowercase_ = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
lowercase_ = hamming(int(n))
print('-----------------------------------------------------')
print(F"""The list with nth numbers is: {hamming_numbers}""")
print('-----------------------------------------------------')
| 669 | 0 |
"""simple docstring"""
import pprint
import requests
_UpperCamelCase = 'https://zenquotes.io/api'
def lowerCAmelCase_ ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def lowerCAmelCase_ ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
_UpperCamelCase = random_quotes()
pprint.pprint(response)
| 179 |
import unittest
from knapsack import greedy_knapsack as kp
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = [10, 20, 30, 40, 50, 60]
__lowerCamelCase : List[str] = [2, 4, 6, 8, 10, 12]
__lowerCamelCase : Tuple = 100
self.assertEqual(kp.calc_profit(a , a , a ) , 210 )
def _snake_case ( self: str ):
self.assertRaisesRegex(a , 'max_weight must greater than zero.' )
def _snake_case ( self: List[str] ):
self.assertRaisesRegex(a , 'Weight can not be negative.' )
def _snake_case ( self: Dict ):
self.assertRaisesRegex(a , 'Profit can not be negative.' )
def _snake_case ( self: List[str] ):
self.assertRaisesRegex(a , 'max_weight must greater than zero.' )
def _snake_case ( self: Any ):
self.assertRaisesRegex(
a , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 669 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( __snake_case : Any) -> Dict:
if not nums:
raise ValueError('''List is empty''')
return sum(SCREAMING_SNAKE_CASE__) / len(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 274 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any]=2 , a: str=3 , a: Any=4 , a: Union[str, Any]=2 , a: Tuple=7 , a: int=True , a: Tuple=True , a: List[str]=True , a: Union[str, Any]=True , a: str=99 , a: Tuple=36 , a: int=2 , a: Dict=4 , a: Union[str, Any]=37 , a: List[str]="gelu" , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Dict=512 , a: Union[str, Any]=16 , a: str=2 , a: int=0.0_2 , a: Optional[Any]=6 , a: Optional[int]=6 , a: Dict=3 , a: Optional[Any]=4 , a: Optional[Any]=None , a: Dict=1000 , ):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : Optional[int] = num_channels
__lowerCamelCase : str = image_size
__lowerCamelCase : int = patch_size
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : Dict = use_input_mask
__lowerCamelCase : Any = use_token_type_ids
__lowerCamelCase : List[str] = use_labels
__lowerCamelCase : str = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : List[Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : Dict = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : int = type_sequence_label_size
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : List[str] = coordinate_size
__lowerCamelCase : int = shape_size
__lowerCamelCase : Union[str, Any] = num_labels
__lowerCamelCase : int = num_choices
__lowerCamelCase : int = scope
__lowerCamelCase : Any = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCamelCase : Any = text_seq_length
__lowerCamelCase : Optional[Any] = (image_size // patch_size) ** 2 + 1
__lowerCamelCase : Any = self.text_seq_length + self.image_seq_length
def _snake_case ( self: List[str] ):
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCamelCase : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCamelCase : List[str] = bbox[i, j, 3]
__lowerCamelCase : str = bbox[i, j, 1]
__lowerCamelCase : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCamelCase : Tuple = bbox[i, j, 2]
__lowerCamelCase : Any = bbox[i, j, 0]
__lowerCamelCase : List[str] = tmp_coordinate
__lowerCamelCase : str = tf.constant(a )
__lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Any = None
if self.use_input_mask:
__lowerCamelCase : int = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCamelCase : Tuple = None
if self.use_token_type_ids:
__lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = None
if self.use_labels:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCamelCase : Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self: Tuple , a: List[Any] , a: Any , a: List[str] , a: Dict , a: Optional[Any] , a: Dict ):
__lowerCamelCase : Optional[Any] = TFLayoutLMvaModel(config=a )
# text + image
__lowerCamelCase : Optional[Any] = model(a , pixel_values=a , training=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , training=a , )
__lowerCamelCase : List[Any] = model(a , bbox=a , pixel_values=a , training=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCamelCase : List[Any] = model(a , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCamelCase : Optional[Any] = model({'pixel_values': pixel_values} , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self: Dict , a: Dict , a: Optional[Any] , a: int , a: Optional[int] , a: List[str] , a: List[str] , a: List[str] ):
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : str = TFLayoutLMvaForSequenceClassification(config=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Tuple , a: Optional[Any] , a: List[Any] ):
__lowerCamelCase : Union[str, Any] = self.num_labels
__lowerCamelCase : Any = TFLayoutLMvaForTokenClassification(config=a )
__lowerCamelCase : Optional[Any] = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self: Dict , a: Optional[Any] , a: str , a: Dict , a: Union[str, Any] , a: List[Any] , a: Optional[int] , a: List[str] ):
__lowerCamelCase : List[Any] = 2
__lowerCamelCase : Any = TFLayoutLMvaForQuestionAnswering(config=a )
__lowerCamelCase : Any = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , training=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = self.prepare_config_and_inputs()
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = config_and_inputs
__lowerCamelCase : Tuple = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def _snake_case ( self: int , a: List[str] , a: Any , a: Optional[Any] , a: Tuple , a: Tuple ):
return True
def _snake_case ( self: str , a: Any , a: Any , a: Optional[int]=False ):
__lowerCamelCase : List[str] = copy.deepcopy(a )
if model_class in get_values(a ):
__lowerCamelCase : Tuple = {
k: tf.tile(tf.expand_dims(a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a ):
__lowerCamelCase : Any = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Dict = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _snake_case ( self: Tuple ):
__lowerCamelCase : int = TFLayoutLMvaModelTester(self )
__lowerCamelCase : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _snake_case ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : int = model_class(a )
if getattr(a , 'hf_compute_loss' , a ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCamelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=a )[0]
]
__lowerCamelCase : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCamelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : Dict = prepared_for_class.pop('input_ids' )
__lowerCamelCase : str = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCamelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : List[str] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__lowerCamelCase : int = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCamelCase : Tuple = -100
__lowerCamelCase : Tuple = tf.convert_to_tensor(a )
__lowerCamelCase : Tuple = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCamelCase : int = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : str = model(a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCamelCase : str = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
# Get keys that were added with the _prepare_for_class function
__lowerCamelCase : Optional[Any] = prepared_for_class.keys() - inputs_dict.keys()
__lowerCamelCase : List[Any] = inspect.signature(model.call ).parameters
__lowerCamelCase : List[str] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCamelCase : Optional[int] = {0: 'input_ids'}
for label_key in label_keys:
__lowerCamelCase : Dict = signature_names.index(a )
__lowerCamelCase : str = label_key
__lowerCamelCase : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCamelCase : Optional[int] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCamelCase : Optional[int] = prepared_for_class[value]
__lowerCamelCase : Any = tuple(a )
# Send to model
__lowerCamelCase : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _snake_case ( self: List[str] ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: int ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: Dict ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
a , a , a , a , a , a , a )
@slow
def _snake_case ( self: int ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Dict = TFLayoutLMvaModel.from_pretrained(a )
self.assertIsNotNone(a )
def UpperCamelCase__ ( ):
__lowerCamelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self: Optional[int] ):
return LayoutLMvaImageProcessor(apply_ocr=a ) if is_vision_available() else None
@slow
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Tuple = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__lowerCamelCase : Union[str, Any] = self.default_image_processor
__lowerCamelCase : List[Any] = prepare_img()
__lowerCamelCase : str = image_processor(images=a , return_tensors='tf' ).pixel_values
__lowerCamelCase : Union[str, Any] = tf.constant([[1, 2]] )
__lowerCamelCase : str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCamelCase : int = model(input_ids=a , bbox=a , pixel_values=a , training=a )
# verify the logits
__lowerCamelCase : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , a )
__lowerCamelCase : Any = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1e-4 ) )
| 669 | 0 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
A = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
A = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
A = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
A = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
A = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __snake_case ( datasets.Metric):
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ), homepage='https://github.com/openai/human-eval', codebase_urls=['https://github.com/openai/human-eval'], reference_urls=['https://github.com/openai/human-eval'], license=_LICENSE, )
def UpperCAmelCase_ ( self, A, A, A=[1, 10, 100], A=4, A=3.0 ):
"""simple docstring"""
if os.getenv('HF_ALLOW_CODE_EVAL', 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=A ) as executor:
lowerCamelCase : Dict = []
lowerCamelCase : Union[str, Any] = Counter()
lowerCamelCase : Dict = 0
lowerCamelCase : Optional[Any] = defaultdict(A )
for task_id, (candidates, test_case) in enumerate(zip(A, A ) ):
for candidate in candidates:
lowerCamelCase : Tuple = candidate + '\n' + test_case
lowerCamelCase : List[str] = (test_program, timeout, task_id, completion_id[task_id])
lowerCamelCase : str = executor.submit(A, *A )
futures.append(A )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(A ):
lowerCamelCase : Any = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
lowerCamelCase : Optional[int] = [], []
for result in results.values():
result.sort()
lowerCamelCase : int = [r[1]['passed'] for r in result]
total.append(len(A ) )
correct.append(sum(A ) )
lowerCamelCase : List[str] = np.array(A )
lowerCamelCase : Optional[Any] = np.array(A )
lowerCamelCase : Any = k
lowerCamelCase : Dict = {F'''pass@{k}''': estimate_pass_at_k(A, A, A ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def UpperCAmelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int):
def estimator(UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : str) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1))
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
lowerCamelCase : Union[str, Any] = itertools.repeat(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__))
else:
assert len(SCREAMING_SNAKE_CASE__) == len(SCREAMING_SNAKE_CASE__)
lowerCamelCase : Optional[int] = iter(SCREAMING_SNAKE_CASE__)
return np.array([estimator(int(SCREAMING_SNAKE_CASE__) , int(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__) for n, c in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)])
| 320 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = CLIPTokenizer
__snake_case = CLIPTokenizerFast
__snake_case = True
__snake_case = {}
__snake_case = False
def _snake_case ( self: Union[str, Any] ):
super().setUp()
# fmt: off
__lowerCamelCase : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__lowerCamelCase : Tuple = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
__lowerCamelCase : Tuple = {'unk_token': '<unk>'}
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _snake_case ( self: Tuple , **a: Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Union[str, Any] , **a: List[str] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : Tuple = 'lower newer'
__lowerCamelCase : Tuple = 'lower newer'
return input_text, output_text
def _snake_case ( self: List[str] ):
__lowerCamelCase : List[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase : Optional[Any] = 'lower newer'
__lowerCamelCase : int = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
__lowerCamelCase : Optional[int] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase : int = tokens + [tokenizer.unk_token]
__lowerCamelCase : int = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@require_ftfy
def _snake_case ( self: Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : str = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
__lowerCamelCase : Optional[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[Any] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__lowerCamelCase : List[Any] = 'xa\u0303y' + ' ' + 'x\xe3y'
__lowerCamelCase : Tuple = tokenizer_s.tokenize(a )
__lowerCamelCase : Any = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of space type
__lowerCamelCase : List[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__lowerCamelCase : List[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[int] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of line break type
__lowerCamelCase : str = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__lowerCamelCase : Dict = tokenizer_s.tokenize(a )
__lowerCamelCase : List[str] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
def _snake_case ( self: List[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCamelCase : Optional[int] = F'{text_of_1_token} {text_of_1_token}'
__lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase : List[Any] = F' {text}'
__lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
def _snake_case ( self: str ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(a ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def _snake_case ( self: Tuple ):
super().test_tokenization_python_rust_equals()
def _snake_case ( self: Tuple ):
# CLIP always lower cases letters
pass
| 669 | 0 |
from __future__ import annotations
from typing import Generic, TypeVar
a__ = TypeVar('''T''')
class SCREAMING_SNAKE_CASE_ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase : T ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase : List[str] = data
__UpperCamelCase : Dict = self
__UpperCamelCase : Any = 0
class SCREAMING_SNAKE_CASE_ ( Generic[T] ):
"""simple docstring"""
def __init__( self : int ) -> List[str]:
"""simple docstring"""
__UpperCamelCase : dict[T, DisjointSetTreeNode[T]] = {}
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : T ) -> int:
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = DisjointSetTreeNode(lowerCAmelCase )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : T ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase : Tuple = self.map[data]
if elem_ref != elem_ref.parent:
__UpperCamelCase : Tuple = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : DisjointSetTreeNode[T] , lowerCAmelCase : DisjointSetTreeNode[T] ) -> Tuple:
"""simple docstring"""
if nodea.rank > nodea.rank:
__UpperCamelCase : List[str] = nodea
else:
__UpperCamelCase : List[str] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : T , lowerCAmelCase : T ) -> List[Any]:
"""simple docstring"""
self.link(self.find_set(lowerCAmelCase ) , self.find_set(lowerCAmelCase ) )
class SCREAMING_SNAKE_CASE_ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : dict[T, dict[T, int]] = {}
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : T ) -> Dict:
"""simple docstring"""
if node not in self.connections:
__UpperCamelCase : List[str] = {}
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : T , lowerCAmelCase : T , lowerCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
self.add_node(lowerCAmelCase )
self.add_node(lowerCAmelCase )
__UpperCamelCase : Dict = weight
__UpperCamelCase : Tuple = weight
def lowerCamelCase__ ( self : str ) -> Any:
"""simple docstring"""
__UpperCamelCase : Tuple = []
__UpperCamelCase : int = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda lowerCAmelCase : x[2] )
# creating the disjoint set
__UpperCamelCase : Dict = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(lowerCAmelCase )
# MST generation
__UpperCamelCase : List[Any] = 0
__UpperCamelCase : Optional[Any] = 0
__UpperCamelCase : Union[str, Any] = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
__UpperCamelCase : int = edges[index]
index += 1
__UpperCamelCase : Dict = disjoint_set.find_set(lowerCAmelCase )
__UpperCamelCase : List[Any] = disjoint_set.find_set(lowerCAmelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
disjoint_set.union(lowerCAmelCase , lowerCAmelCase )
return graph
| 279 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowercase_ = False
try:
lowercase_ = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class A_ :
'''simple docstring'''
def __init__( self: int , a: str = None , a: list = [] ):
__lowerCamelCase : Dict = 0
__lowerCamelCase : Dict = choices
__lowerCamelCase : Tuple = prompt
if sys.platform == "win32":
__lowerCamelCase : Union[str, Any] = '*'
else:
__lowerCamelCase : Any = '➔ '
def _snake_case ( self: Any , a: Tuple , a: str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a )
else:
forceWrite(self.choices[index] , a )
def _snake_case ( self: Tuple , a: int ):
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(a )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _snake_case ( self: Optional[int] , a: Direction , a: int = 1 ):
__lowerCamelCase : str = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a )
move_cursor(a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def _snake_case ( self: Tuple ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def _snake_case ( self: Optional[int] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def _snake_case ( self: str ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def _snake_case ( self: Union[str, Any] ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a )] for number in range(10 )] )
def _snake_case ( self: str ):
__lowerCamelCase : List[Any] = int(chr(self.current_selection ) )
__lowerCamelCase : Any = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a )
else:
return
else:
return
def _snake_case ( self: str , a: int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
__lowerCamelCase : Dict = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
__lowerCamelCase : Any = int(builtins.input() )
except ValueError:
__lowerCamelCase : str = default_choice
else:
__lowerCamelCase : Optional[int] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(a , '\n' )
return choice
| 669 | 0 |
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase_ = ""
UpperCAmelCase_ = ""
UpperCAmelCase_ = ""
UpperCAmelCase_ = ""
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = tweepy.OAuthHandler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
auth.set_access_token(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = tweepy.API(SCREAMING_SNAKE_CASE__ )
# initialize a list to hold all the tweepy Tweets
_UpperCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_UpperCAmelCase = api.user_timeline(screen_name=SCREAMING_SNAKE_CASE__ , count=2_00 )
# save most recent tweets
alltweets.extend(SCREAMING_SNAKE_CASE__ )
# save the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(SCREAMING_SNAKE_CASE__ ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
_UpperCAmelCase = api.user_timeline(
screen_name=SCREAMING_SNAKE_CASE__ , count=2_00 , max_id=SCREAMING_SNAKE_CASE__ )
# save most recent tweets
alltweets.extend(SCREAMING_SNAKE_CASE__ )
# update the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
print(F'''...{len(SCREAMING_SNAKE_CASE__ )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
_UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
_UpperCAmelCase = csv.writer(SCREAMING_SNAKE_CASE__ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32") | 32 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = (CMStochasticIterativeScheduler,)
__snake_case = 10
def _snake_case ( self: Any , **a: Dict ):
__lowerCamelCase : Optional[Any] = {
'num_train_timesteps': 201,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
config.update(**a )
return config
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Any = 10
__lowerCamelCase : Any = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = self.scheduler_classes[0](**a )
scheduler.set_timesteps(a )
__lowerCamelCase : Any = scheduler.timesteps[0]
__lowerCamelCase : List[str] = scheduler.timesteps[1]
__lowerCamelCase : Union[str, Any] = self.dummy_sample
__lowerCamelCase : int = 0.1 * sample
__lowerCamelCase : Optional[Any] = scheduler.step(a , a , a ).prev_sample
__lowerCamelCase : List[str] = scheduler.step(a , a , a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self: Optional[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a )
def _snake_case ( self: List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : Tuple = self.scheduler_classes[0]
__lowerCamelCase : Tuple = self.get_scheduler_config()
__lowerCamelCase : Tuple = scheduler_class(**a )
__lowerCamelCase : int = 1
scheduler.set_timesteps(a )
__lowerCamelCase : Optional[int] = scheduler.timesteps
__lowerCamelCase : List[str] = torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(a ):
# 1. scale model input
__lowerCamelCase : List[str] = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Optional[int] = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : str = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : str = pred_prev_sample
__lowerCamelCase : List[str] = torch.sum(torch.abs(a ) )
__lowerCamelCase : str = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 1_9_2.7_6_1_4 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_1_0 ) < 1e-3
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Optional[Any] = self.get_scheduler_config()
__lowerCamelCase : int = scheduler_class(**a )
__lowerCamelCase : List[Any] = [106, 0]
scheduler.set_timesteps(timesteps=a )
__lowerCamelCase : Dict = scheduler.timesteps
__lowerCamelCase : int = torch.manual_seed(0 )
__lowerCamelCase : Any = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__lowerCamelCase : Tuple = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Tuple = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : Any = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : Any = pred_prev_sample
__lowerCamelCase : Dict = torch.sum(torch.abs(a ) )
__lowerCamelCase : Optional[Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 3_4_7.6_3_5_7 ) < 1e-2
assert abs(result_mean.item() - 0.4_5_2_7 ) < 1e-3
def _snake_case ( self: Tuple ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : int = self.get_scheduler_config()
__lowerCamelCase : List[Any] = scheduler_class(**a )
__lowerCamelCase : Optional[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(a , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a )
def _snake_case ( self: int ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [39, 30, 12, 1, 0]
__lowerCamelCase : List[Any] = len(a )
with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Dict = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a )
| 669 | 0 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class _snake_case :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case : List[Any]=None , **snake_case : Dict ):
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' )
UpperCAmelCase_ :List[str] = model
UpperCAmelCase_ :Optional[int] = kwargs.get('''model_save_dir''' , snake_case )
UpperCAmelCase_ :Optional[Any] = kwargs.get('''latest_model_name''' , snake_case )
def __call__( self : Union[str, Any] , **snake_case : str ):
UpperCAmelCase_ :Optional[Any] = {k: np.array(snake_case ) for k, v in kwargs.items()}
return self.model.run(snake_case , snake_case )
@staticmethod
def snake_case_ ( snake_case : Union[str, Path] , snake_case : Any=None , snake_case : int=None ):
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' )
UpperCAmelCase_ :List[str] = 'CPUExecutionProvider'
return ort.InferenceSession(snake_case , providers=[provider] , sess_options=snake_case )
def snake_case_ ( self : Optional[int] , snake_case : Union[str, Path] , snake_case : Optional[str] = None , **snake_case : Dict ):
UpperCAmelCase_ :Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase_ :Any = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase_ :int = Path(snake_case ).joinpath(snake_case )
try:
shutil.copyfile(snake_case , snake_case )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase_ :List[str] = self.model_save_dir.joinpath(snake_case )
if src_path.exists():
UpperCAmelCase_ :Optional[Any] = Path(snake_case ).joinpath(snake_case )
try:
shutil.copyfile(snake_case , snake_case )
except shutil.SameFileError:
pass
def snake_case_ ( self : List[str] , snake_case : Union[str, os.PathLike] , **snake_case : List[Any] , ):
if os.path.isfile(snake_case ):
logger.error(f'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(snake_case , exist_ok=snake_case )
# saving model weights/files
self._save_pretrained(snake_case , **snake_case )
@classmethod
def snake_case_ ( cls : Dict , snake_case : Union[str, Path] , snake_case : Optional[Union[bool, str, None]] = None , snake_case : Optional[Union[str, None]] = None , snake_case : bool = False , snake_case : Optional[str] = None , snake_case : Optional[str] = None , snake_case : Optional[str] = None , snake_case : Optional["ort.SessionOptions"] = None , **snake_case : Optional[Any] , ):
UpperCAmelCase_ :List[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(snake_case ):
UpperCAmelCase_ :Optional[Any] = OnnxRuntimeModel.load_model(
os.path.join(snake_case , snake_case ) , provider=snake_case , sess_options=snake_case )
UpperCAmelCase_ :Union[str, Any] = Path(snake_case )
# load model from hub
else:
# download model
UpperCAmelCase_ :List[str] = hf_hub_download(
repo_id=snake_case , filename=snake_case , use_auth_token=snake_case , revision=snake_case , cache_dir=snake_case , force_download=snake_case , )
UpperCAmelCase_ :Union[str, Any] = Path(snake_case ).parent
UpperCAmelCase_ :int = Path(snake_case ).name
UpperCAmelCase_ :Tuple = OnnxRuntimeModel.load_model(snake_case , provider=snake_case , sess_options=snake_case )
return cls(model=snake_case , **snake_case )
@classmethod
def snake_case_ ( cls : Union[str, Any] , snake_case : Union[str, Path] , snake_case : bool = True , snake_case : Optional[str] = None , snake_case : Optional[str] = None , **snake_case : List[Any] , ):
UpperCAmelCase_ :Any = None
if len(str(snake_case ).split('''@''' ) ) == 2:
UpperCAmelCase_ :Optional[Any] = model_id.split('''@''' )
return cls._from_pretrained(
model_id=snake_case , revision=snake_case , cache_dir=snake_case , force_download=snake_case , use_auth_token=snake_case , **snake_case , )
| 608 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowercase_ = input('Enter image url: ').strip()
print(F"""Downloading image from {url} ...""")
lowercase_ = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
lowercase_ = soup.find('meta', {'property': 'og:image'})['content']
lowercase_ = requests.get(image_url).content
lowercase_ = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 669 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class snake_case (unittest.TestCase ):
@property
def _a ( self ) -> Tuple:
torch.manual_seed(0 )
lowercase__ = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("DownBlock2D", "AttnDownBlock2D") ,up_block_types=("AttnUpBlock2D", "UpBlock2D") ,)
return model
@property
def _a ( self ) -> List[str]:
torch.manual_seed(0 )
lowercase__ = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=3 ,)
return model
@property
def _a ( self ) -> Tuple:
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModel(UpperCAmelCase_ )
def _a ( self ) -> List[str]:
lowercase__ = self.dummy_uncond_unet
lowercase__ = DDIMScheduler()
lowercase__ = self.dummy_vq_model
lowercase__ = LDMPipeline(unet=UpperCAmelCase_ ,vqvae=UpperCAmelCase_ ,scheduler=UpperCAmelCase_ )
ldm.to(UpperCAmelCase_ )
ldm.set_progress_bar_config(disable=UpperCAmelCase_ )
lowercase__ = torch.manual_seed(0 )
lowercase__ = ldm(generator=UpperCAmelCase_ ,num_inference_steps=2 ,output_type="numpy" ).images
lowercase__ = torch.manual_seed(0 )
lowercase__ = ldm(generator=UpperCAmelCase_ ,num_inference_steps=2 ,output_type="numpy" ,return_dict=UpperCAmelCase_ )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
lowercase__ = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class snake_case (unittest.TestCase ):
def _a ( self ) -> List[Any]:
lowercase__ = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(UpperCAmelCase_ )
ldm.set_progress_bar_config(disable=UpperCAmelCase_ )
lowercase__ = torch.manual_seed(0 )
lowercase__ = ldm(generator=UpperCAmelCase_ ,num_inference_steps=5 ,output_type="numpy" ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase__ = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
lowercase__ = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 267 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowercase_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowercase_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
lowercase_ = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
lowercase_ = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
lowercase_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
lowercase_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 669 | 0 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase : Any = {
'/attention/': '/0/SelfAttention/',
'/self_attention/': '/0/SelfAttention/',
'/encoder_decoder_attention/': '/1/EncDecAttention/',
'value': 'v',
'query': 'q',
'key': 'k',
'out': 'o',
'pre_self_attention_layer_norm': '0/layer_norm',
'pre_cross_attention_layer_norm': '1/layer_norm',
'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong
'token_embedder': 'shared',
'encoder_norm': 'final_layer_norm',
'decoder_norm': 'final_layer_norm',
'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight',
'router/router_weights/w/': 'router/classifier/',
'roer/roer_weights/w/': 'router/classifier/',
'logits_dense': 'lm_head',
}
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> Any:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = list(s_dict.keys())
for key in keys:
__UpperCamelCase : List[Any] = r'.*/layers_(\d+)'
__UpperCamelCase : Optional[int] = key
if re.match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__UpperCamelCase : Union[str, Any] = re.sub(R"layers_(\d+)" , R"block/\1/layer" , SCREAMING_SNAKE_CASE__)
__UpperCamelCase : Dict = r'(encoder|decoder)\/'
if re.match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__UpperCamelCase : int = re.match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__).groups()
if groups[0] == "encoder":
__UpperCamelCase : Union[str, Any] = re.sub(R"/mlp/" , R"/1/mlp/" , SCREAMING_SNAKE_CASE__)
__UpperCamelCase : Optional[Any] = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , SCREAMING_SNAKE_CASE__)
elif groups[0] == "decoder":
__UpperCamelCase : List[str] = re.sub(R"/mlp/" , R"/2/mlp/" , SCREAMING_SNAKE_CASE__)
__UpperCamelCase : Any = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , SCREAMING_SNAKE_CASE__)
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__UpperCamelCase : Union[str, Any] = new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
print(F'{key} -> {new_key}')
__UpperCamelCase : str = s_dict.pop(SCREAMING_SNAKE_CASE__)
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__UpperCamelCase : str = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__UpperCamelCase : Union[str, Any] = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys()):
if "expert" in key:
__UpperCamelCase : Optional[int] = s_dict[key].shape[0]
__UpperCamelCase : Any = s_dict[key]
for idx in range(SCREAMING_SNAKE_CASE__):
__UpperCamelCase : Optional[int] = expert_weihts[idx]
print(F'{key} -> {key.replace("expert/" , "nested fstring")}')
s_dict.pop(SCREAMING_SNAKE_CASE__)
return s_dict
lowercase : Union[str, Any] = {
'NUM_ENCODER_LAYERS': 'num_layers',
'NUM_DECODER_LAYERS': 'num_decoder_layers',
'NUM_HEADS': 'num_heads',
'HEAD_DIM': 'd_kv',
'EMBED_DIM': 'd_model',
'MLP_DIM': 'd_ff',
'NUM_SELECTED_EXPERTS': 'num_selected_experts',
'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers',
'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers',
'dense.MlpBlock.activations': 'feed_forward_proj',
}
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int) -> str:
'''simple docstring'''
import regex as re
with open(SCREAMING_SNAKE_CASE__ , "r") as f:
__UpperCamelCase : Any = f.read()
__UpperCamelCase : str = re.findall(R"(.*) = ([0-9.]*)" , SCREAMING_SNAKE_CASE__)
__UpperCamelCase : str = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__UpperCamelCase : List[str] = float(SCREAMING_SNAKE_CASE__) if '.' in value else int(SCREAMING_SNAKE_CASE__)
__UpperCamelCase : Optional[int] = re.findall(R"(.*activations) = \(\'(.*)\',\)" , SCREAMING_SNAKE_CASE__)[0]
__UpperCamelCase : int = str(activation[1])
__UpperCamelCase : List[str] = num_experts
__UpperCamelCase : Optional[Any] = SwitchTransformersConfig(**SCREAMING_SNAKE_CASE__)
return config
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : str="./" , _lowerCamelCase : str=8) -> Optional[int]:
'''simple docstring'''
print(F'Loading flax weights from : {flax_checkpoint_path}')
__UpperCamelCase : Tuple = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__)
if gin_file is not None:
__UpperCamelCase : Any = convert_gin_to_config(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
else:
__UpperCamelCase : Any = SwitchTransformersConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
__UpperCamelCase : Optional[Any] = SwitchTransformersForConditionalGeneration(SCREAMING_SNAKE_CASE__)
__UpperCamelCase : Optional[Any] = flax_params['target']
__UpperCamelCase : Optional[int] = flatten_dict(SCREAMING_SNAKE_CASE__ , sep="/")
__UpperCamelCase : List[str] = rename_keys(SCREAMING_SNAKE_CASE__)
__UpperCamelCase : Union[str, Any] = unflatten_dict(SCREAMING_SNAKE_CASE__ , sep="/")
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
print(F'Save PyTorch model to {pytorch_dump_path}')
pt_model.save_pretrained(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
lowercase : Union[str, Any] = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
) | 557 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """xlm-roberta"""
def __init__( self: Optional[Any] , a: int=3_0522 , a: List[Any]=768 , a: Tuple=12 , a: List[str]=12 , a: Dict=3072 , a: List[str]="gelu" , a: Any=0.1 , a: Optional[Any]=0.1 , a: str=512 , a: Optional[int]=2 , a: int=0.0_2 , a: str=1e-12 , a: str=1 , a: List[Any]=0 , a: Dict=2 , a: Dict="absolute" , a: List[Any]=True , a: str=None , **a: List[Any] , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase : Optional[Any] = vocab_size
__lowerCamelCase : Optional[Any] = hidden_size
__lowerCamelCase : Dict = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : str = hidden_act
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : int = max_position_embeddings
__lowerCamelCase : Any = type_vocab_size
__lowerCamelCase : int = initializer_range
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : List[Any] = position_embedding_type
__lowerCamelCase : List[str] = use_cache
__lowerCamelCase : Optional[int] = classifier_dropout
class A_ ( __UpperCamelCase ):
'''simple docstring'''
@property
def _snake_case ( self: Optional[Any] ):
if self.task == "multiple-choice":
__lowerCamelCase : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_snake_case : Dict = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Dict=None, lowerCAmelCase_ : Optional[Any]=None, lowerCAmelCase_ : Optional[Any]=None, lowerCAmelCase_ : str=None, lowerCAmelCase_ : List[str]=None, lowerCAmelCase_ : List[str]=None, ):
if attention_mask is None:
__lowerCAmelCase = np.where(input_ids != config.pad_token_id, 1, 0 )
if decoder_attention_mask is None:
__lowerCAmelCase = np.where(decoder_input_ids != config.pad_token_id, 1, 0 )
if head_mask is None:
__lowerCAmelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int=False , lowerCAmelCase_ : str=9_9 , lowerCAmelCase_ : str=1_6 , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : int=3_2 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Optional[int]=0.02 , ) -> Any:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = initializer_range
def lowercase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__lowerCAmelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__lowerCAmelCase = shift_tokens_right(lowerCAmelCase_ , 1 , 2 )
__lowerCAmelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase_ , )
__lowerCAmelCase = prepare_blenderbot_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict ) -> Union[str, Any]:
__lowerCAmelCase = 2_0
__lowerCAmelCase = model_class_name(lowerCAmelCase_ )
__lowerCAmelCase = model.encode(inputs_dict['input_ids'] )
__lowerCAmelCase = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__lowerCAmelCase = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCAmelCase = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
__lowerCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
__lowerCAmelCase = model.decode(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ) -> str:
__lowerCAmelCase = 2_0
__lowerCAmelCase = model_class_name(lowerCAmelCase_ )
__lowerCAmelCase = model.encode(inputs_dict['input_ids'] )
__lowerCAmelCase = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__lowerCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowerCAmelCase = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCAmelCase = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
__lowerCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
__lowerCAmelCase = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ )
__lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
a_ = 99
def lowercase ( self : Optional[Any] ) -> int:
__lowerCAmelCase = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
__lowerCAmelCase = input_ids.shape[0]
__lowerCAmelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase ( self : Optional[int] ) -> int:
__lowerCAmelCase = self._get_config_and_data()
__lowerCAmelCase = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase_ )
__lowerCAmelCase = lm_model(input_ids=lowerCAmelCase_ )
__lowerCAmelCase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Dict:
__lowerCAmelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
__lowerCAmelCase = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase_ )
__lowerCAmelCase = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
__lowerCAmelCase = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
__lowerCAmelCase = lm_model(input_ids=lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ )
__lowerCAmelCase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
__lowerCAmelCase = shift_tokens_right(lowerCAmelCase_ , 1 , 2 )
__lowerCAmelCase = np.equal(lowerCAmelCase_ , 1 ).astype(np.floataa ).sum()
__lowerCAmelCase = np.equal(lowerCAmelCase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCAmelCase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _UpperCAmelCase ( __UpperCamelCase , unittest.TestCase , __UpperCamelCase ):
"""simple docstring"""
a_ = True
a_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
a_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowercase ( self : Any ) -> Dict:
__lowerCAmelCase = FlaxBlenderbotModelTester(self )
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = model_class(lowerCAmelCase_ )
@jax.jit
def encode_jitted(lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : Tuple ):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
with self.subTest('JIT Enabled' ):
__lowerCAmelCase = encode_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCAmelCase = encode_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase ( self : Tuple ) -> int:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
__lowerCAmelCase = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int ):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest('JIT Enabled' ):
__lowerCAmelCase = decode_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCAmelCase = decode_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase ( self : Optional[Any] ) -> List[Any]:
for model_class_name in self.all_model_classes:
__lowerCAmelCase = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__lowerCAmelCase = np.ones((1, 1) ) * model.config.eos_token_id
__lowerCAmelCase = model(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = {'num_beams': 1, 'early_stopping': True, 'min_length': 1_5, 'max_length': 2_5}
__lowerCAmelCase = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
__lowerCAmelCase = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=lowerCAmelCase_ )
__lowerCAmelCase = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
__lowerCAmelCase = ['Sam']
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='jax' )
__lowerCAmelCase = model.generate(**lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = 'Sam is a great name. It means "sun" in Gaelic.'
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , **lowerCAmelCase_ )
assert generated_txt[0].strip() == tgt_text
| 53 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ConsistencyModelPipeline
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__snake_case = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def _snake_case ( self: str ):
__lowerCamelCase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[str] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _snake_case ( self: int , a: str=False ):
if class_cond:
__lowerCamelCase : str = self.dummy_cond_unet
else:
__lowerCamelCase : str = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCamelCase : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _snake_case ( self: int , a: List[str] , a: Any=0 ):
if str(a ).startswith('mps' ):
__lowerCamelCase : List[Any] = torch.manual_seed(a )
else:
__lowerCamelCase : Tuple = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : Optional[Any] = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components()
__lowerCamelCase : str = ConsistencyModelPipeline(**a )
__lowerCamelCase : str = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Any = self.get_dummy_inputs(a )
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[int] = ConsistencyModelPipeline(**a )
__lowerCamelCase : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(a )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : List[str] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Optional[int] = self.get_dummy_components()
__lowerCamelCase : Tuple = ConsistencyModelPipeline(**a )
__lowerCamelCase : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Tuple = self.get_dummy_inputs(a )
__lowerCamelCase : str = 1
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Any = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[int] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: List[str] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[Any] = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[Any] = ConsistencyModelPipeline(**a )
__lowerCamelCase : List[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_dummy_inputs(a )
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[str] = None
__lowerCamelCase : str = 0
__lowerCamelCase : Tuple = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self: Optional[int] , a: str=0 , a: Tuple=False , a: Tuple="cpu" , a: List[str]=torch.floataa , a: Optional[Any]=(1, 3, 64, 64) ):
__lowerCamelCase : Optional[Any] = torch.manual_seed(a )
__lowerCamelCase : Optional[int] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__lowerCamelCase : Dict = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
__lowerCamelCase : Optional[Any] = latents
return inputs
def _snake_case ( self: Any , a: Any=0 , a: List[str]="cpu" , a: Optional[Any]=torch.floataa , a: int=(1, 3, 64, 64) ):
if type(a ) == str:
__lowerCamelCase : Dict = torch.device(a )
__lowerCamelCase : Union[str, Any] = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : str = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _snake_case ( self: str ):
__lowerCamelCase : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs()
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_inputs()
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Tuple = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : int = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case ( self: Dict ):
__lowerCamelCase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : str = self.get_inputs(get_fixed_latents=a , device=a )
__lowerCamelCase : str = 1
__lowerCamelCase : Union[str, Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : str = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 669 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class UpperCAmelCase__ ( __UpperCamelCase ):
"""simple docstring"""
__UpperCAmelCase : int = '''roberta'''
def __init__( self : Optional[Any] ,_a : int=5_0265 ,_a : Union[str, Any]=768 ,_a : Optional[Any]=12 ,_a : List[Any]=12 ,_a : Optional[int]=3072 ,_a : Dict="gelu" ,_a : Optional[int]=0.1 ,_a : Tuple=0.1 ,_a : Optional[Any]=512 ,_a : Tuple=2 ,_a : List[Any]=0.02 ,_a : List[str]=1E-12 ,_a : Union[str, Any]=1 ,_a : Tuple=0 ,_a : str=2 ,_a : Tuple="absolute" ,_a : Optional[Any]=True ,_a : Union[str, Any]=None ,**_a : Dict ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
_a : Union[str, Any] = vocab_size
_a : Optional[int] = hidden_size
_a : Optional[Any] = num_hidden_layers
_a : Optional[Any] = num_attention_heads
_a : Any = hidden_act
_a : List[Any] = intermediate_size
_a : Union[str, Any] = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : Union[str, Any] = max_position_embeddings
_a : str = type_vocab_size
_a : Optional[int] = initializer_range
_a : str = layer_norm_eps
_a : List[Any] = position_embedding_type
_a : List[str] = use_cache
_a : str = classifier_dropout
class UpperCAmelCase__ ( __UpperCamelCase ):
"""simple docstring"""
@property
def __lowercase ( self : int ):
'''simple docstring'''
if self.task == "multiple-choice":
_a : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_a : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 229 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """trocr"""
__snake_case = ["""past_key_values"""]
__snake_case = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Dict , a: List[str]=5_0265 , a: Optional[Any]=1024 , a: Tuple=12 , a: Dict=16 , a: Optional[Any]=4096 , a: Optional[Any]="gelu" , a: Optional[int]=512 , a: int=0.1 , a: str=0.0 , a: Union[str, Any]=0.0 , a: Any=2 , a: Optional[int]=0.0_2 , a: Optional[Any]=0.0 , a: List[Any]=True , a: Any=False , a: int=True , a: Optional[Any]=True , a: Tuple=1 , a: Union[str, Any]=0 , a: Any=2 , **a: List[Any] , ):
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : Union[str, Any] = d_model
__lowerCamelCase : List[str] = decoder_layers
__lowerCamelCase : Optional[Any] = decoder_attention_heads
__lowerCamelCase : List[str] = decoder_ffn_dim
__lowerCamelCase : Optional[int] = activation_function
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : Dict = dropout
__lowerCamelCase : int = attention_dropout
__lowerCamelCase : List[str] = activation_dropout
__lowerCamelCase : Union[str, Any] = init_std
__lowerCamelCase : Tuple = decoder_layerdrop
__lowerCamelCase : str = use_cache
__lowerCamelCase : List[Any] = scale_embedding
__lowerCamelCase : Any = use_learned_position_embeddings
__lowerCamelCase : List[Any] = layernorm_embedding
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
| 669 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _UpperCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
a__ =4_2
class _UpperCAmelCase ( __UpperCamelCase ,__UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , A = 3_2 , A = 6_4 , A = 2_0 , A = 7_6_8 , A=7_7 , A=4 , A = 0.0 , A = "silu" , A = None , A = None , A = "linear" , A = "prd" , A = None , A = None , A = None , ) -> List[str]:
super().__init__()
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : List[Any] = attention_head_dim
_UpperCAmelCase : Optional[int] = num_attention_heads * attention_head_dim
_UpperCAmelCase : List[Any] = additional_embeddings
_UpperCAmelCase : str = time_embed_dim or inner_dim
_UpperCAmelCase : Dict = embedding_proj_dim or embedding_dim
_UpperCAmelCase : Union[str, Any] = clip_embed_dim or embedding_dim
_UpperCAmelCase : str = Timesteps(A , A , 0 )
_UpperCAmelCase : List[str] = TimestepEmbedding(A , A , out_dim=A , act_fn=A )
_UpperCAmelCase : Dict = nn.Linear(A , A )
if embedding_proj_norm_type is None:
_UpperCAmelCase : List[str] = None
elif embedding_proj_norm_type == "layer":
_UpperCAmelCase : Any = nn.LayerNorm(A )
else:
raise ValueError(f'unsupported embedding_proj_norm_type: {embedding_proj_norm_type}' )
_UpperCAmelCase : Any = nn.Linear(A , A )
if encoder_hid_proj_type is None:
_UpperCAmelCase : Optional[int] = None
elif encoder_hid_proj_type == "linear":
_UpperCAmelCase : Optional[int] = nn.Linear(A , A )
else:
raise ValueError(f'unsupported encoder_hid_proj_type: {encoder_hid_proj_type}' )
_UpperCAmelCase : int = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , A ) )
if added_emb_type == "prd":
_UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.zeros(1 , 1 , A ) )
elif added_emb_type is None:
_UpperCAmelCase : str = None
else:
raise ValueError(
f'`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.' )
_UpperCAmelCase : Optional[Any] = nn.ModuleList(
[
BasicTransformerBlock(
A , A , A , dropout=A , activation_fn='''gelu''' , attention_bias=A , )
for d in range(A )
] )
if norm_in_type == "layer":
_UpperCAmelCase : int = nn.LayerNorm(A )
elif norm_in_type is None:
_UpperCAmelCase : List[Any] = None
else:
raise ValueError(f'Unsupported norm_in_type: {norm_in_type}.' )
_UpperCAmelCase : Tuple = nn.LayerNorm(A )
_UpperCAmelCase : Optional[int] = nn.Linear(A , A )
_UpperCAmelCase : Tuple = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
_UpperCAmelCase : Optional[int] = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , A , persistent=A )
_UpperCAmelCase : List[str] = nn.Parameter(torch.zeros(1 , A ) )
_UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.zeros(1 , A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : int = {}
def fn_recursive_add_processors(A , A , A ):
if hasattr(A , '''set_processor''' ):
_UpperCAmelCase : Any = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'{name}.{sub_name}' , A , A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(A , A , A )
return processors
def __lowerCAmelCase ( self , A ) -> Any:
_UpperCAmelCase : str = len(self.attn_processors.keys() )
if isinstance(A , A ) and len(A ) != count:
raise ValueError(
f'A dict of processors was passed, but the number of processors {len(A )} does not match the'
f' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(A , A , A ):
if hasattr(A , '''set_processor''' ):
if not isinstance(A , A ):
module.set_processor(A )
else:
module.set_processor(processor.pop(f'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'{name}.{sub_name}' , A , A )
for name, module in self.named_children():
fn_recursive_attn_processor(A , A , A )
def __lowerCAmelCase ( self ) -> Dict:
self.set_attn_processor(AttnProcessor() )
def __lowerCAmelCase ( self , A , A , A , A = None , A = None , A = True , ) -> str:
_UpperCAmelCase : Tuple = hidden_states.shape[0]
_UpperCAmelCase : Dict = timestep
if not torch.is_tensor(A ):
_UpperCAmelCase : Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(A ) and len(timesteps.shape ) == 0:
_UpperCAmelCase : Any = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_UpperCAmelCase : Union[str, Any] = timesteps * torch.ones(A , dtype=timesteps.dtype , device=timesteps.device )
_UpperCAmelCase : Union[str, Any] = self.time_proj(A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_UpperCAmelCase : Optional[int] = timesteps_projected.to(dtype=self.dtype )
_UpperCAmelCase : int = self.time_embedding(A )
if self.embedding_proj_norm is not None:
_UpperCAmelCase : Dict = self.embedding_proj_norm(A )
_UpperCAmelCase : Union[str, Any] = self.embedding_proj(A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_UpperCAmelCase : Optional[int] = self.encoder_hidden_states_proj(A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
_UpperCAmelCase : Optional[int] = self.proj_in(A )
_UpperCAmelCase : Optional[int] = self.positional_embedding.to(hidden_states.dtype )
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Dict = 0
if encoder_hidden_states is not None:
additional_embeds.append(A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_UpperCAmelCase : Union[str, Any] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_UpperCAmelCase : List[str] = hidden_states[:, None, :]
_UpperCAmelCase : List[str] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_UpperCAmelCase : str = self.prd_embedding.to(hidden_states.dtype ).expand(A , -1 , -1 )
additional_embeds.append(A )
_UpperCAmelCase : Union[str, Any] = torch.cat(
A , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_UpperCAmelCase : List[Any] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_UpperCAmelCase : Optional[Any] = F.pad(
A , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_UpperCAmelCase : int = hidden_states + positional_embeddings
if attention_mask is not None:
_UpperCAmelCase : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
_UpperCAmelCase : Dict = F.pad(A , (0, self.additional_embeddings) , value=0.0 )
_UpperCAmelCase : Optional[int] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_UpperCAmelCase : List[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_UpperCAmelCase : Union[str, Any] = self.norm_in(A )
for block in self.transformer_blocks:
_UpperCAmelCase : Optional[int] = block(A , attention_mask=A )
_UpperCAmelCase : Any = self.norm_out(A )
if self.prd_embedding is not None:
_UpperCAmelCase : Optional[int] = hidden_states[:, -1]
else:
_UpperCAmelCase : Optional[int] = hidden_states[:, additional_embeddings_len:]
_UpperCAmelCase : Optional[int] = self.proj_to_clip_embeddings(A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=A )
def __lowerCAmelCase ( self , A ) -> List[str]:
_UpperCAmelCase : Dict = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 506 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """tokenizer"""]
__snake_case = """CLIPImageProcessor"""
__snake_case = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self: Union[str, Any] , a: int=None , a: List[str]=None , **a: str ):
__lowerCamelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : str = kwargs.pop('feature_extractor' )
__lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: List[Any]=None , a: List[str]=None , a: int=None , **a: List[Any] ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__lowerCamelCase : Dict = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
__lowerCamelCase : Tuple = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
__lowerCamelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _snake_case ( self: List[Any] , *a: Optional[Any] , **a: int ):
return self.tokenizer.batch_decode(*a , **a )
def _snake_case ( self: Any , *a: Union[str, Any] , **a: Optional[Any] ):
return self.tokenizer.decode(*a , **a )
@property
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = self.tokenizer.model_input_names
__lowerCamelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 179 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _snake_case ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def _snake_case ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase : Any = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def _snake_case ( self: Dict ):
torch.manual_seed(0 )
__lowerCamelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(a )
def _snake_case ( self: List[str] ):
__lowerCamelCase : Union[str, Any] = self.dummy_uncond_unet
__lowerCamelCase : List[str] = DDIMScheduler()
__lowerCamelCase : str = self.dummy_vq_model
__lowerCamelCase : Optional[int] = LDMPipeline(unet=a , vqvae=a , scheduler=a )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
__lowerCamelCase : Optional[int] = torch.manual_seed(0 )
__lowerCamelCase : Any = ldm(generator=a , num_inference_steps=2 , output_type='numpy' ).images
__lowerCamelCase : Tuple = torch.manual_seed(0 )
__lowerCamelCase : Dict = ldm(generator=a , num_inference_steps=2 , output_type='numpy' , return_dict=a )[0]
__lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[int] = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
__lowerCamelCase : str = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
__lowerCamelCase : Dict = torch.manual_seed(0 )
__lowerCamelCase : int = ldm(generator=a , num_inference_steps=5 , output_type='numpy' ).images
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__lowerCamelCase : List[Any] = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
__lowerCamelCase : Union[str, Any] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 669 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = 'hf-internal-testing/tiny-random-t5'
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_lowerCamelCase )
lowerCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(_lowerCamelCase )
lowerCAmelCase_ = tokenizer('''This is me''' , return_tensors='''pt''' )
lowerCAmelCase_ = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowerCAmelCase_ = model.generate(**_lowerCamelCase )
lowerCAmelCase_ = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCamelCase )
lowerCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(_lowerCamelCase )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowerCAmelCase_ = model_reloaded.generate(**_lowerCamelCase )
self.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase ) )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = 'hf-internal-testing/tiny-random-t5'
lowerCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(_lowerCamelCase )
lowerCAmelCase_ = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_lowerCamelCase ):
model.save_pretrained(_lowerCamelCase )
lowerCAmelCase_ = model.reverse_bettertransformer()
model.save_pretrained(_lowerCamelCase )
| 274 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = 'Usage of script: script_name <size_of_canvas:int>'
lowercase_ = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = [[False for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
return canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
for i, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for j, _ in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = bool(random.getrandbits(1 ) )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = np.array(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for c, pt in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = __judge_point(
SCREAMING_SNAKE_CASE__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__lowerCamelCase : Any = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__lowerCamelCase : list[list[bool]] = current_canvas.tolist()
return return_canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Optional[Any] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__lowerCamelCase : Tuple = pt
if pt:
if alive < 2:
__lowerCamelCase : Optional[Any] = False
elif alive == 2 or alive == 3:
__lowerCamelCase : Any = True
elif alive > 3:
__lowerCamelCase : Dict = False
else:
if alive == 3:
__lowerCamelCase : Tuple = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ ,lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(['w', 'k'])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 669 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str]):
lowerCamelCase : Tuple = []
lowerCamelCase : int = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0))
lowerCamelCase : Optional[int] = result + left + right
return input_list
def UpperCAmelCase ( UpperCAmelCase__ : Any):
if len(SCREAMING_SNAKE_CASE__) <= 1:
return input_list
lowerCamelCase : Dict = list(SCREAMING_SNAKE_CASE__)
# iteration for two-way merging
lowerCamelCase : Any = 2
while p <= len(SCREAMING_SNAKE_CASE__):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__):
lowerCamelCase : Optional[int] = i
lowerCamelCase : Optional[Any] = i + p - 1
lowerCamelCase : List[Any] = (low + high + 1) // 2
lowerCamelCase : List[str] = merge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# final merge of last two parts
if p * 2 >= len(SCREAMING_SNAKE_CASE__):
lowerCamelCase : Union[str, Any] = i
lowerCamelCase : Union[str, Any] = merge(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__) - 1)
break
p *= 2
return input_list
if __name__ == "__main__":
A = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
A = []
else:
A = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 320 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """char"""
__snake_case = """bpe"""
__snake_case = """wp"""
lowercase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """char_tokenizer"""]
__snake_case = """ViTImageProcessor"""
__snake_case = """MgpstrTokenizer"""
def __init__( self: int , a: Dict=None , a: Optional[int]=None , **a: List[str] ):
__lowerCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : Optional[Any] = kwargs.pop('feature_extractor' )
__lowerCamelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
__lowerCamelCase : Any = tokenizer
__lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('gpt2' )
__lowerCamelCase : int = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: Optional[int]=None , a: List[Any]=None , a: int=None , **a: str ):
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCamelCase : Dict = self.image_processor(a , return_tensors=a , **a )
if text is not None:
__lowerCamelCase : Dict = self.char_tokenizer(a , return_tensors=a , **a )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase : List[str] = encodings['input_ids']
return inputs
def _snake_case ( self: List[str] , a: List[Any] ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = sequences
__lowerCamelCase : List[str] = char_preds.size(0 )
__lowerCamelCase , __lowerCamelCase : str = self._decode_helper(a , 'char' )
__lowerCamelCase , __lowerCamelCase : Optional[int] = self._decode_helper(a , 'bpe' )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self._decode_helper(a , 'wp' )
__lowerCamelCase : Tuple = []
__lowerCamelCase : List[Any] = []
for i in range(a ):
__lowerCamelCase : List[Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
__lowerCamelCase : Optional[int] = [char_strs[i], bpe_strs[i], wp_strs[i]]
__lowerCamelCase : Any = scores.index(max(a ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Optional[int] = final_strs
__lowerCamelCase : Dict = final_scores
__lowerCamelCase : Dict = char_strs
__lowerCamelCase : List[Any] = bpe_strs
__lowerCamelCase : Tuple = wp_strs
return out
def _snake_case ( self: int , a: Optional[int] , a: Optional[Any] ):
if format == DecodeType.CHARACTER:
__lowerCamelCase : Optional[Any] = self.char_decode
__lowerCamelCase : Union[str, Any] = 1
__lowerCamelCase : List[str] = '[s]'
elif format == DecodeType.BPE:
__lowerCamelCase : Dict = self.bpe_decode
__lowerCamelCase : List[str] = 2
__lowerCamelCase : Any = '#'
elif format == DecodeType.WORDPIECE:
__lowerCamelCase : List[str] = self.wp_decode
__lowerCamelCase : int = 102
__lowerCamelCase : Dict = '[SEP]'
else:
raise ValueError(F'Format {format} is not supported.' )
__lowerCamelCase , __lowerCamelCase : int = [], []
__lowerCamelCase : Tuple = pred_logits.size(0 )
__lowerCamelCase : List[Any] = pred_logits.size(1 )
__lowerCamelCase , __lowerCamelCase : Dict = pred_logits.topk(1 , dim=-1 , largest=a , sorted=a )
__lowerCamelCase : List[str] = preds_index.view(-1 , a )[:, 1:]
__lowerCamelCase : Dict = decoder(a )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = torch.nn.functional.softmax(a , dim=2 ).max(dim=2 )
__lowerCamelCase : List[str] = preds_max_prob[:, 1:]
for index in range(a ):
__lowerCamelCase : str = preds_str[index].find(a )
__lowerCamelCase : Tuple = preds_str[index][:pred_eos]
__lowerCamelCase : Any = preds_index[index].cpu().tolist()
__lowerCamelCase : Any = pred_index.index(a ) if eos_token in pred_index else -1
__lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1]
__lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(a )
conf_scores.append(a )
return dec_strs, conf_scores
def _snake_case ( self: Tuple , a: Optional[int] ):
__lowerCamelCase : Dict = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(a )]
return decode_strs
def _snake_case ( self: Optional[int] , a: Tuple ):
return self.bpe_tokenizer.batch_decode(a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : int = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(a )]
return decode_strs
| 669 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
a__ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def A__ () -> Any:
__UpperCamelCase : Any = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__UpperCamelCase : int = get_sagemaker_input()
else:
__UpperCamelCase : Optional[int] = get_cluster_input()
return config
def A__ (snake_case : Dict=None ) -> Union[str, Any]:
if subparsers is not None:
__UpperCamelCase : int = subparsers.add_parser("""config""" , description=SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase : Tuple = argparse.ArgumentParser("""Accelerate config command""" , description=SCREAMING_SNAKE_CASE__ )
parser.add_argument(
"""--config_file""" , default=SCREAMING_SNAKE_CASE__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have """
"""such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed """
"""with \'huggingface\'."""
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def A__ (snake_case : Optional[Any] ) -> Optional[int]:
__UpperCamelCase : Optional[Any] = get_user_input()
if args.config_file is not None:
__UpperCamelCase : List[Any] = args.config_file
else:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
os.makedirs(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase : Optional[Any] = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(SCREAMING_SNAKE_CASE__ )
else:
config.to_yaml_file(SCREAMING_SNAKE_CASE__ )
print(F'''accelerate configuration saved at {config_file}''' )
def A__ () -> Union[str, Any]:
__UpperCamelCase : Optional[Any] = config_command_parser()
__UpperCamelCase : List[Any] = parser.parse_args()
config_command(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 279 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
__lowerCamelCase : Optional[int] = TOKENIZER_CLASSES
else:
__lowerCamelCase : Union[str, Any] = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE__ , tokenizer_name + 'Fast' )}
logger.info(f'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
__lowerCamelCase : int = TOKENIZER_CLASSES[tokenizer_name]
__lowerCamelCase : Optional[int] = True
if checkpoint_name is None:
__lowerCamelCase : List[Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowerCamelCase : Optional[Any] = [checkpoint_name]
logger.info(f'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(f'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
__lowerCamelCase : Tuple = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ )
# Save fast tokenizer
logger.info(f'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowerCamelCase , __lowerCamelCase : Tuple = checkpoint.split('/' )
__lowerCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif add_prefix:
__lowerCamelCase : Any = checkpoint
__lowerCamelCase : Dict = dump_path
else:
__lowerCamelCase : List[str] = None
__lowerCamelCase : Optional[int] = dump_path
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowerCamelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowerCamelCase : int = file_path.split(SCREAMING_SNAKE_CASE__ )[-1][0]
if next_char == "/":
__lowerCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = None
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
__lowerCamelCase : Dict = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ , filename_prefix=SCREAMING_SNAKE_CASE__ )
logger.info(f'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(SCREAMING_SNAKE_CASE__ )
logger.info(f'=> removing {file_name}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
lowercase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 0 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(
__UpperCamelCase , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class __UpperCamelCase ( __UpperCamelCase ):
def UpperCamelCase( self , _UpperCamelCase ):
if self.framework == "tf":
_UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_UpperCamelCase )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = self.get_masked_index(_UpperCamelCase )
_UpperCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , f'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def UpperCamelCase( self , _UpperCamelCase ):
if isinstance(_UpperCamelCase , _UpperCamelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=None , **_UpperCamelCase ):
if return_tensors is None:
_UpperCAmelCase = self.framework
_UpperCAmelCase = self.tokenizer(_UpperCamelCase , return_tensors=_UpperCamelCase )
self.ensure_exactly_one_mask_token(_UpperCamelCase )
return model_inputs
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = self.model(**_UpperCamelCase )
_UpperCAmelCase = model_inputs['input_ids']
return model_outputs
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=5 , _UpperCamelCase=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
_UpperCAmelCase = target_ids.shape[0]
_UpperCAmelCase = model_outputs['input_ids'][0]
_UpperCAmelCase = model_outputs['logits']
if self.framework == "tf":
_UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_UpperCAmelCase = outputs.numpy()
_UpperCAmelCase = outputs[0, masked_index, :]
_UpperCAmelCase = stable_softmax(_UpperCamelCase , axis=-1 )
if target_ids is not None:
_UpperCAmelCase = tf.gather_nd(tf.squeeze(_UpperCamelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
_UpperCAmelCase = tf.expand_dims(_UpperCamelCase , 0 )
_UpperCAmelCase = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase )
_UpperCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
_UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_UpperCamelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_UpperCAmelCase = outputs[0, masked_index, :]
_UpperCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
_UpperCAmelCase = probs[..., target_ids]
_UpperCAmelCase = probs.topk(_UpperCamelCase )
_UpperCAmelCase = []
_UpperCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
_UpperCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
_UpperCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
_UpperCAmelCase = target_ids[p].tolist()
_UpperCAmelCase = p
# Filter padding out:
_UpperCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_UpperCAmelCase = self.tokenizer.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
_UpperCAmelCase = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(_UpperCamelCase )
result.append(_UpperCamelCase )
if single_mask:
return result[0]
return result
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=None ):
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = [targets]
try:
_UpperCAmelCase = self.tokenizer.get_vocab()
except Exception:
_UpperCAmelCase = {}
_UpperCAmelCase = []
for target in targets:
_UpperCAmelCase = vocab.get(_UpperCamelCase , _UpperCamelCase )
if id_ is None:
_UpperCAmelCase = self.tokenizer(
_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , max_length=1 , truncation=_UpperCamelCase , )['input_ids']
if len(_UpperCamelCase ) == 0:
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
_UpperCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
f'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
_UpperCAmelCase = list(set(_UpperCamelCase ) )
if len(_UpperCamelCase ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
_UpperCAmelCase = np.array(_UpperCamelCase )
return target_ids
def UpperCamelCase( self , _UpperCamelCase=None , _UpperCamelCase=None ):
_UpperCAmelCase = {}
if targets is not None:
_UpperCAmelCase = self.get_target_ids(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = target_ids
if top_k is not None:
_UpperCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , _UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ):
_UpperCAmelCase = super().__call__(_UpperCamelCase , **_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) == 1:
return outputs[0]
return outputs | 32 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : List[str] = PegasusTokenizer(a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self: List[Any] ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def _snake_case ( self: Tuple , **a: List[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: List[Any] , a: int ):
return ("This is a test", "This is a test")
def _snake_case ( self: Any ):
__lowerCamelCase : Dict = '</s>'
__lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(a ) , 1103 )
def _snake_case ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
__lowerCamelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
__lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
def _snake_case ( self: int ):
__lowerCamelCase : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__lowerCamelCase : Tuple = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
__lowerCamelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
__lowerCamelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
__lowerCamelCase : int = 'To ensure a smooth flow of bank resolutions.'
__lowerCamelCase : Union[str, Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
__lowerCamelCase : List[str] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _snake_case ( self: str ):
__lowerCamelCase : List[str] = ['This is going to be way too long.' * 150, 'short example']
__lowerCamelCase : Tuple = ['not super long but more than 5 tokens', 'tiny']
__lowerCamelCase : Union[str, Any] = self._large_tokenizer(a , padding=a , truncation=a , return_tensors='pt' )
__lowerCamelCase : List[str] = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
@slow
def _snake_case ( self: List[str] ):
# fmt: off
__lowerCamelCase : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: str ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : str = PegasusTokenizer(a , offset=0 , mask_token_sent=a , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self: List[str] ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def _snake_case ( self: Union[str, Any] , **a: Dict ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: List[str] , a: Any ):
return ("This is a test", "This is a test")
def _snake_case ( self: Any ):
__lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
__lowerCamelCase : int = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
__lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
@require_torch
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Union[str, Any] = ['This is going to be way too long.' * 1000, 'short example']
__lowerCamelCase : Tuple = ['not super long but more than 5 tokens', 'tiny']
__lowerCamelCase : str = self._large_tokenizer(a , padding=a , truncation=a , return_tensors='pt' )
__lowerCamelCase : Any = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
def _snake_case ( self: Any ):
__lowerCamelCase : int = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
__lowerCamelCase : Dict = self._large_tokenizer(a ).input_ids
self.assertListEqual(
a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 669 | 0 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 608 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class snake_case (__UpperCamelCase ):
lowerCAmelCase__ :int = "Speech2TextFeatureExtractor"
lowerCAmelCase__ :Tuple = "Speech2TextTokenizer"
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Optional[int]:
super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ )
lowercase__ = self.feature_extractor
lowercase__ = False
def __call__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ) -> Optional[int]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase_ ,**UpperCAmelCase_ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowercase__ = kwargs.pop("raw_speech" )
else:
lowercase__ = kwargs.pop("audio" ,UpperCAmelCase_ )
lowercase__ = kwargs.pop("sampling_rate" ,UpperCAmelCase_ )
lowercase__ = kwargs.pop("text" ,UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
lowercase__ = args[0]
lowercase__ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowercase__ = self.feature_extractor(UpperCAmelCase_ ,*UpperCAmelCase_ ,sampling_rate=UpperCAmelCase_ ,**UpperCAmelCase_ )
if text is not None:
lowercase__ = self.tokenizer(UpperCAmelCase_ ,**UpperCAmelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase__ = encodings['input_ids']
return inputs
def _a ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def _a ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ) -> List[str]:
return self.tokenizer.decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
@contextmanager
def _a ( self ) -> int:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowercase__ = True
lowercase__ = self.tokenizer
yield
lowercase__ = self.feature_extractor
lowercase__ = False
| 267 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = 2
while i * i <= n:
__lowerCamelCase : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCamelCase__ ( ):
__lowerCamelCase : str = 1
__lowerCamelCase : List[str] = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 669 | 0 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : str = int(SCREAMING_SNAKE_CASE__)
if n_element < 1:
__UpperCamelCase : str = ValueError("a should be a positive number")
raise my_error
__UpperCamelCase : Tuple = [1]
__UpperCamelCase : List[Any] = (0, 0, 0)
__UpperCamelCase : Any = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5))
index += 1
return hamming_list
if __name__ == "__main__":
lowercase : Tuple = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
lowercase : Union[str, Any] = hamming(int(n))
print('-----------------------------------------------------')
print(f"The list with nth numbers is: {hamming_numbers}")
print('-----------------------------------------------------') | 557 |
import numpy as np
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] ):
__lowerCamelCase : int = (0, 0)
__lowerCamelCase : List[str] = None
__lowerCamelCase : int = 0
__lowerCamelCase : int = 0
__lowerCamelCase : Union[str, Any] = 0
def __eq__( self: Optional[int] , a: List[Any] ):
return self.position == cell.position
def _snake_case ( self: Any ):
print(self.position )
class A_ :
'''simple docstring'''
def __init__( self: str , a: List[str]=(5, 5) ):
__lowerCamelCase : Optional[Any] = np.zeros(a )
__lowerCamelCase : List[str] = world_size[0]
__lowerCamelCase : Optional[int] = world_size[1]
def _snake_case ( self: List[Any] ):
print(self.w )
def _snake_case ( self: Optional[int] , a: str ):
__lowerCamelCase : Tuple = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__lowerCamelCase : Optional[int] = cell.position[0]
__lowerCamelCase : List[str] = cell.position[1]
__lowerCamelCase : Dict = []
for n in neughbour_cord:
__lowerCamelCase : Dict = current_x + n[0]
__lowerCamelCase : Optional[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__lowerCamelCase : Optional[Any] = Cell()
__lowerCamelCase : Any = (x, y)
__lowerCamelCase : Dict = cell
neighbours.append(a )
return neighbours
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = []
__lowerCamelCase : int = []
_open.append(SCREAMING_SNAKE_CASE__ )
while _open:
__lowerCamelCase : Union[str, Any] = np.argmin([n.f for n in _open] )
__lowerCamelCase : int = _open[min_f]
_closed.append(_open.pop(SCREAMING_SNAKE_CASE__ ) )
if current == goal:
break
for n in world.get_neigbours(SCREAMING_SNAKE_CASE__ ):
for c in _closed:
if c == n:
continue
__lowerCamelCase : Optional[int] = current.g + 1
__lowerCamelCase , __lowerCamelCase : int = n.position
__lowerCamelCase , __lowerCamelCase : Tuple = goal.position
__lowerCamelCase : Dict = (ya - ya) ** 2 + (xa - xa) ** 2
__lowerCamelCase : str = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = []
while current.parent is not None:
path.append(current.position )
__lowerCamelCase : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowercase_ = Gridworld()
# Start position and goal
lowercase_ = Cell()
lowercase_ = (0, 0)
lowercase_ = Cell()
lowercase_ = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
lowercase_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowercase_ = 1
print(world.w)
| 669 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
_snake_case : Union[str, Any] = logging.get_logger(__name__)
_snake_case : Union[str, Any] = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class _UpperCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
a_ = """perceiver"""
def __init__( self : List[Any] , lowerCAmelCase_ : Any=2_5_6 , lowerCAmelCase_ : List[str]=1_2_8_0 , lowerCAmelCase_ : Dict=7_6_8 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : Optional[Any]=2_6 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[Any]="kv" , lowerCAmelCase_ : Union[str, Any]=1 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : str="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : Dict=1e-12 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Any=2_6_2 , lowerCAmelCase_ : Any=2_0_4_8 , lowerCAmelCase_ : Any=5_6 , lowerCAmelCase_ : Union[str, Any]=[3_6_8, 4_9_6] , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : List[str]=1_9_2_0 , lowerCAmelCase_ : Any=1_6 , lowerCAmelCase_ : Optional[int]=[1, 1_6, 2_2_4, 2_2_4] , **lowerCAmelCase_ : str , ) -> List[str]:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = num_latents
__lowerCAmelCase = d_latents
__lowerCAmelCase = d_model
__lowerCAmelCase = num_blocks
__lowerCAmelCase = num_self_attends_per_block
__lowerCAmelCase = num_self_attention_heads
__lowerCAmelCase = num_cross_attention_heads
__lowerCAmelCase = qk_channels
__lowerCAmelCase = v_channels
__lowerCAmelCase = cross_attention_shape_for_attention
__lowerCAmelCase = self_attention_widening_factor
__lowerCAmelCase = cross_attention_widening_factor
__lowerCAmelCase = hidden_act
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = use_query_residual
# masked language modeling attributes
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
# image classification attributes
__lowerCAmelCase = image_size
# flow attributes
__lowerCAmelCase = train_size
# multimodal autoencoding attributes
__lowerCAmelCase = num_frames
__lowerCAmelCase = audio_samples_per_frame
__lowerCAmelCase = samples_per_patch
__lowerCAmelCase = output_shape
class _UpperCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
@property
def lowercase ( self : Optional[Any] ) -> List[Any]:
if self.task == "multiple-choice":
__lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def lowercase ( self : Tuple ) -> Tuple:
return 1e-4
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 4_0 , lowerCAmelCase_ : int = 4_0 , ) -> List[Any]:
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowerCAmelCase = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowerCAmelCase = preprocessor.num_special_tokens_to_add(lowerCAmelCase_ )
__lowerCAmelCase = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase_ )
# Generate dummy inputs according to compute batch and sequence
__lowerCAmelCase = [' '.join(['a'] ) * seq_length] * batch_size
__lowerCAmelCase = dict(preprocessor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ ) )
__lowerCAmelCase = inputs.pop('input_ids' )
return inputs
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowerCAmelCase = compute_effective_axis_dimension(lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch )
__lowerCAmelCase = self._generate_dummy_images(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = dict(preprocessor(images=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ ) )
__lowerCAmelCase = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 53 |
import math
from datetime import datetime, timedelta
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = year % 19
__lowerCamelCase : int = year % 4
__lowerCamelCase : Any = year % 7
__lowerCamelCase : Dict = math.floor(year / 100 )
__lowerCamelCase : str = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__lowerCamelCase : Optional[int] = leap_day_inhibits / 4
__lowerCamelCase : str = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__lowerCamelCase : Optional[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowerCamelCase : Optional[int] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__lowerCamelCase : Tuple = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 18 )
else:
return datetime(SCREAMING_SNAKE_CASE__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowercase_ = 'will be' if year > datetime.now().year else 'was'
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 669 | 0 |
'''simple docstring'''
from graphs.minimum_spanning_tree_kruskal import kruskal
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = 9
_a : List[str] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
_a : List[str] = kruskal(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_a : str = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(SCREAMING_SNAKE_CASE__ ) == sorted(SCREAMING_SNAKE_CASE__ )
| 229 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A_ ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__snake_case = 1
@register_to_config
def __init__( self: str , a: str=2000 , a: List[str]=0.1 , a: Any=20 , a: Dict=1e-3 ):
__lowerCamelCase : Dict = None
__lowerCamelCase : Any = None
__lowerCamelCase : Optional[int] = None
def _snake_case ( self: int , a: str , a: Union[str, torch.device] = None ):
__lowerCamelCase : int = torch.linspace(1 , self.config.sampling_eps , a , device=a )
def _snake_case ( self: List[Any] , a: Union[str, Any] , a: Tuple , a: Optional[Any] , a: Dict=None ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowerCamelCase : Tuple = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowerCamelCase : Optional[int] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowerCamelCase : Optional[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowerCamelCase : List[str] = std.unsqueeze(-1 )
__lowerCamelCase : Any = -score / std
# compute
__lowerCamelCase : List[Any] = -1.0 / len(self.timesteps )
__lowerCamelCase : Any = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowerCamelCase : Dict = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowerCamelCase : int = beta_t.unsqueeze(-1 )
__lowerCamelCase : Any = -0.5 * beta_t * x
__lowerCamelCase : List[Any] = torch.sqrt(a )
__lowerCamelCase : Tuple = drift - diffusion**2 * score
__lowerCamelCase : str = x + drift * dt
# add noise
__lowerCamelCase : Any = randn_tensor(x.shape , layout=x.layout , generator=a , device=x.device , dtype=x.dtype )
__lowerCamelCase : Any = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self: Optional[int] ):
return self.config.num_train_timesteps
| 669 | 0 |
"""simple docstring"""
import sys
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
_UpperCAmelCase : Dict = len(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[int] = [[0 for x in range(SCREAMING_SNAKE_CASE__ )] for x in range(SCREAMING_SNAKE_CASE__ )]
_UpperCAmelCase : List[str] = [[0 for x in range(SCREAMING_SNAKE_CASE__ )] for x in range(SCREAMING_SNAKE_CASE__ )]
for chain_length in range(2 , SCREAMING_SNAKE_CASE__ ):
for a in range(1 , n - chain_length + 1 ):
_UpperCAmelCase : List[Any] = a + chain_length - 1
_UpperCAmelCase : Optional[int] = sys.maxsize
for c in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase : Union[str, Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_UpperCAmelCase : Union[str, Any] = cost
_UpperCAmelCase : List[str] = c
return matrix, sol
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ):
if i == j:
print('''A''' + str(SCREAMING_SNAKE_CASE__ ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , optimal_solution[i][j] )
print_optiomal_solution(SCREAMING_SNAKE_CASE__ , optimal_solution[i][j] + 1 , SCREAMING_SNAKE_CASE__ )
print(''')''' , end=''' ''' )
def lowerCamelCase_ ():
_UpperCAmelCase : Dict = [30, 35, 15, 5, 10, 20, 25]
_UpperCAmelCase : int = len(SCREAMING_SNAKE_CASE__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_UpperCAmelCase : List[str] = matrix_chain_order(SCREAMING_SNAKE_CASE__ )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(SCREAMING_SNAKE_CASE__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 506 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = int(SCREAMING_SNAKE_CASE__ )
if n_element < 1:
__lowerCamelCase : str = ValueError('a should be a positive number' )
raise my_error
__lowerCamelCase : Tuple = [1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = (0, 0, 0)
__lowerCamelCase : Any = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowercase_ = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
lowercase_ = hamming(int(n))
print('-----------------------------------------------------')
print(F"""The list with nth numbers is: {hamming_numbers}""")
print('-----------------------------------------------------')
| 669 | 0 |
"""simple docstring"""
from torch import nn
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
def __init__( self :Dict , __lowercase :Optional[Any] , __lowercase :Optional[Any] ):
super().__init__()
__lowerCamelCase : int =class_size
__lowerCamelCase : List[str] =embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowerCamelCase : Optional[int] =nn.Linear(__lowercase , __lowercase )
def __lowercase ( self :Dict , __lowercase :List[str] ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
__lowerCamelCase : int =self.mlp(__lowercase )
return logits
| 179 |
import unittest
from knapsack import greedy_knapsack as kp
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = [10, 20, 30, 40, 50, 60]
__lowerCamelCase : List[str] = [2, 4, 6, 8, 10, 12]
__lowerCamelCase : Tuple = 100
self.assertEqual(kp.calc_profit(a , a , a ) , 210 )
def _snake_case ( self: str ):
self.assertRaisesRegex(a , 'max_weight must greater than zero.' )
def _snake_case ( self: List[str] ):
self.assertRaisesRegex(a , 'Weight can not be negative.' )
def _snake_case ( self: Dict ):
self.assertRaisesRegex(a , 'Profit can not be negative.' )
def _snake_case ( self: List[str] ):
self.assertRaisesRegex(a , 'max_weight must greater than zero.' )
def _snake_case ( self: Any ):
self.assertRaisesRegex(
a , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 669 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Tuple =logging.get_logger(__name__)
A_ : Tuple ={
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class __UpperCAmelCase ( __UpperCamelCase ):
__A : List[str] = 'audio-spectrogram-transformer'
def __init__( self , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1E-12 , _lowerCamelCase=16 , _lowerCamelCase=True , _lowerCamelCase=10 , _lowerCamelCase=10 , _lowerCamelCase=1024 , _lowerCamelCase=128 , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = qkv_bias
lowerCAmelCase_ = frequency_stride
lowerCAmelCase_ = time_stride
lowerCAmelCase_ = max_length
lowerCAmelCase_ = num_mel_bins
| 274 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any]=2 , a: str=3 , a: Any=4 , a: Union[str, Any]=2 , a: Tuple=7 , a: int=True , a: Tuple=True , a: List[str]=True , a: Union[str, Any]=True , a: str=99 , a: Tuple=36 , a: int=2 , a: Dict=4 , a: Union[str, Any]=37 , a: List[str]="gelu" , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Dict=512 , a: Union[str, Any]=16 , a: str=2 , a: int=0.0_2 , a: Optional[Any]=6 , a: Optional[int]=6 , a: Dict=3 , a: Optional[Any]=4 , a: Optional[Any]=None , a: Dict=1000 , ):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : Optional[int] = num_channels
__lowerCamelCase : str = image_size
__lowerCamelCase : int = patch_size
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : Dict = use_input_mask
__lowerCamelCase : Any = use_token_type_ids
__lowerCamelCase : List[str] = use_labels
__lowerCamelCase : str = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : List[Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : Dict = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : int = type_sequence_label_size
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : List[str] = coordinate_size
__lowerCamelCase : int = shape_size
__lowerCamelCase : Union[str, Any] = num_labels
__lowerCamelCase : int = num_choices
__lowerCamelCase : int = scope
__lowerCamelCase : Any = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCamelCase : Any = text_seq_length
__lowerCamelCase : Optional[Any] = (image_size // patch_size) ** 2 + 1
__lowerCamelCase : Any = self.text_seq_length + self.image_seq_length
def _snake_case ( self: List[str] ):
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCamelCase : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCamelCase : List[str] = bbox[i, j, 3]
__lowerCamelCase : str = bbox[i, j, 1]
__lowerCamelCase : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCamelCase : Tuple = bbox[i, j, 2]
__lowerCamelCase : Any = bbox[i, j, 0]
__lowerCamelCase : List[str] = tmp_coordinate
__lowerCamelCase : str = tf.constant(a )
__lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Any = None
if self.use_input_mask:
__lowerCamelCase : int = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCamelCase : Tuple = None
if self.use_token_type_ids:
__lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = None
if self.use_labels:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCamelCase : Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self: Tuple , a: List[Any] , a: Any , a: List[str] , a: Dict , a: Optional[Any] , a: Dict ):
__lowerCamelCase : Optional[Any] = TFLayoutLMvaModel(config=a )
# text + image
__lowerCamelCase : Optional[Any] = model(a , pixel_values=a , training=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , training=a , )
__lowerCamelCase : List[Any] = model(a , bbox=a , pixel_values=a , training=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCamelCase : List[Any] = model(a , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCamelCase : Optional[Any] = model({'pixel_values': pixel_values} , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self: Dict , a: Dict , a: Optional[Any] , a: int , a: Optional[int] , a: List[str] , a: List[str] , a: List[str] ):
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : str = TFLayoutLMvaForSequenceClassification(config=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Tuple , a: Optional[Any] , a: List[Any] ):
__lowerCamelCase : Union[str, Any] = self.num_labels
__lowerCamelCase : Any = TFLayoutLMvaForTokenClassification(config=a )
__lowerCamelCase : Optional[Any] = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self: Dict , a: Optional[Any] , a: str , a: Dict , a: Union[str, Any] , a: List[Any] , a: Optional[int] , a: List[str] ):
__lowerCamelCase : List[Any] = 2
__lowerCamelCase : Any = TFLayoutLMvaForQuestionAnswering(config=a )
__lowerCamelCase : Any = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , training=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = self.prepare_config_and_inputs()
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = config_and_inputs
__lowerCamelCase : Tuple = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def _snake_case ( self: int , a: List[str] , a: Any , a: Optional[Any] , a: Tuple , a: Tuple ):
return True
def _snake_case ( self: str , a: Any , a: Any , a: Optional[int]=False ):
__lowerCamelCase : List[str] = copy.deepcopy(a )
if model_class in get_values(a ):
__lowerCamelCase : Tuple = {
k: tf.tile(tf.expand_dims(a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a ):
__lowerCamelCase : Any = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Dict = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _snake_case ( self: Tuple ):
__lowerCamelCase : int = TFLayoutLMvaModelTester(self )
__lowerCamelCase : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _snake_case ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : int = model_class(a )
if getattr(a , 'hf_compute_loss' , a ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCamelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=a )[0]
]
__lowerCamelCase : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCamelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : Dict = prepared_for_class.pop('input_ids' )
__lowerCamelCase : str = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCamelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : List[str] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__lowerCamelCase : int = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCamelCase : Tuple = -100
__lowerCamelCase : Tuple = tf.convert_to_tensor(a )
__lowerCamelCase : Tuple = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCamelCase : int = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : str = model(a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCamelCase : str = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
# Get keys that were added with the _prepare_for_class function
__lowerCamelCase : Optional[Any] = prepared_for_class.keys() - inputs_dict.keys()
__lowerCamelCase : List[Any] = inspect.signature(model.call ).parameters
__lowerCamelCase : List[str] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCamelCase : Optional[int] = {0: 'input_ids'}
for label_key in label_keys:
__lowerCamelCase : Dict = signature_names.index(a )
__lowerCamelCase : str = label_key
__lowerCamelCase : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCamelCase : Optional[int] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCamelCase : Optional[int] = prepared_for_class[value]
__lowerCamelCase : Any = tuple(a )
# Send to model
__lowerCamelCase : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _snake_case ( self: List[str] ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: int ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: Dict ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
a , a , a , a , a , a , a )
@slow
def _snake_case ( self: int ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Dict = TFLayoutLMvaModel.from_pretrained(a )
self.assertIsNotNone(a )
def UpperCamelCase__ ( ):
__lowerCamelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self: Optional[int] ):
return LayoutLMvaImageProcessor(apply_ocr=a ) if is_vision_available() else None
@slow
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Tuple = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__lowerCamelCase : Union[str, Any] = self.default_image_processor
__lowerCamelCase : List[Any] = prepare_img()
__lowerCamelCase : str = image_processor(images=a , return_tensors='tf' ).pixel_values
__lowerCamelCase : Union[str, Any] = tf.constant([[1, 2]] )
__lowerCamelCase : str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCamelCase : int = model(input_ids=a , bbox=a , pixel_values=a , training=a )
# verify the logits
__lowerCamelCase : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , a )
__lowerCamelCase : Any = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1e-4 ) )
| 669 | 0 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __UpperCamelCase):
_lowerCAmelCase = (CMStochasticIterativeScheduler,)
_lowerCAmelCase = 10
def UpperCAmelCase_ ( self, **A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = {
'num_train_timesteps': 201,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
config.update(**A )
return config
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Any = 10
lowerCamelCase : Any = self.get_scheduler_config()
lowerCamelCase : Union[str, Any] = self.scheduler_classes[0](**A )
scheduler.set_timesteps(A )
lowerCamelCase : Any = scheduler.timesteps[0]
lowerCamelCase : List[str] = scheduler.timesteps[1]
lowerCamelCase : Union[str, Any] = self.dummy_sample
lowerCamelCase : int = 0.1 * sample
lowerCamelCase : Optional[Any] = scheduler.step(A, A, A ).prev_sample
lowerCamelCase : List[str] = scheduler.step(A, A, A ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = self.scheduler_classes[0]
lowerCamelCase : Tuple = self.get_scheduler_config()
lowerCamelCase : Tuple = scheduler_class(**A )
lowerCamelCase : int = 1
scheduler.set_timesteps(A )
lowerCamelCase : Optional[int] = scheduler.timesteps
lowerCamelCase : List[str] = torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(A ):
# 1. scale model input
lowerCamelCase : List[str] = scheduler.scale_model_input(A, A )
# 2. predict noise residual
lowerCamelCase : Optional[int] = model(A, A )
# 3. predict previous sample x_t-1
lowerCamelCase : str = scheduler.step(A, A, A, generator=A ).prev_sample
lowerCamelCase : str = pred_prev_sample
lowerCamelCase : List[str] = torch.sum(torch.abs(A ) )
lowerCamelCase : str = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Optional[Any] = self.get_scheduler_config()
lowerCamelCase : int = scheduler_class(**A )
lowerCamelCase : List[Any] = [106, 0]
scheduler.set_timesteps(timesteps=A )
lowerCamelCase : Dict = scheduler.timesteps
lowerCamelCase : int = torch.manual_seed(0 )
lowerCamelCase : Any = self.dummy_model()
lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowerCamelCase : Tuple = scheduler.scale_model_input(A, A )
# 2. predict noise residual
lowerCamelCase : Tuple = model(A, A )
# 3. predict previous sample x_t-1
lowerCamelCase : Any = scheduler.step(A, A, A, generator=A ).prev_sample
lowerCamelCase : Any = pred_prev_sample
lowerCamelCase : Dict = torch.sum(torch.abs(A ) )
lowerCamelCase : Optional[Any] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = self.scheduler_classes[0]
lowerCamelCase : int = self.get_scheduler_config()
lowerCamelCase : List[Any] = scheduler_class(**A )
lowerCamelCase : Optional[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(A, msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase : Union[str, Any] = scheduler_class(**A )
lowerCamelCase : Optional[int] = [39, 30, 12, 1, 0]
lowerCamelCase : List[Any] = len(A )
with self.assertRaises(A, msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=A, timesteps=A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config()
lowerCamelCase : Union[str, Any] = scheduler_class(**A )
lowerCamelCase : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A, msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}', ):
scheduler.set_timesteps(timesteps=A )
| 320 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = CLIPTokenizer
__snake_case = CLIPTokenizerFast
__snake_case = True
__snake_case = {}
__snake_case = False
def _snake_case ( self: Union[str, Any] ):
super().setUp()
# fmt: off
__lowerCamelCase : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__lowerCamelCase : Tuple = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
__lowerCamelCase : Tuple = {'unk_token': '<unk>'}
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _snake_case ( self: Tuple , **a: Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Union[str, Any] , **a: List[str] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : Tuple = 'lower newer'
__lowerCamelCase : Tuple = 'lower newer'
return input_text, output_text
def _snake_case ( self: List[str] ):
__lowerCamelCase : List[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase : Optional[Any] = 'lower newer'
__lowerCamelCase : int = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
__lowerCamelCase : Optional[int] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase : int = tokens + [tokenizer.unk_token]
__lowerCamelCase : int = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@require_ftfy
def _snake_case ( self: Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : str = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
__lowerCamelCase : Optional[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[Any] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__lowerCamelCase : List[Any] = 'xa\u0303y' + ' ' + 'x\xe3y'
__lowerCamelCase : Tuple = tokenizer_s.tokenize(a )
__lowerCamelCase : Any = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of space type
__lowerCamelCase : List[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__lowerCamelCase : List[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[int] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of line break type
__lowerCamelCase : str = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__lowerCamelCase : Dict = tokenizer_s.tokenize(a )
__lowerCamelCase : List[str] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
def _snake_case ( self: List[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCamelCase : Optional[int] = F'{text_of_1_token} {text_of_1_token}'
__lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase : List[Any] = F' {text}'
__lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
def _snake_case ( self: str ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(a ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def _snake_case ( self: Tuple ):
super().test_tokenization_python_rust_equals()
def _snake_case ( self: Tuple ):
# CLIP always lower cases letters
pass
| 669 | 0 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A__ (snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : List[Any] ) -> Optional[int]:
# Initialise PyTorch model
__UpperCamelCase : Dict = BertConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(F'''Building PyTorch model from configuration: {config}''' )
__UpperCamelCase : Union[str, Any] = BertForPreTraining(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 279 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowercase_ = False
try:
lowercase_ = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class A_ :
'''simple docstring'''
def __init__( self: int , a: str = None , a: list = [] ):
__lowerCamelCase : Dict = 0
__lowerCamelCase : Dict = choices
__lowerCamelCase : Tuple = prompt
if sys.platform == "win32":
__lowerCamelCase : Union[str, Any] = '*'
else:
__lowerCamelCase : Any = '➔ '
def _snake_case ( self: Any , a: Tuple , a: str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a )
else:
forceWrite(self.choices[index] , a )
def _snake_case ( self: Tuple , a: int ):
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(a )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _snake_case ( self: Optional[int] , a: Direction , a: int = 1 ):
__lowerCamelCase : str = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a )
move_cursor(a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def _snake_case ( self: Tuple ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def _snake_case ( self: Optional[int] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def _snake_case ( self: str ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def _snake_case ( self: Union[str, Any] ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a )] for number in range(10 )] )
def _snake_case ( self: str ):
__lowerCamelCase : List[Any] = int(chr(self.current_selection ) )
__lowerCamelCase : Any = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a )
else:
return
else:
return
def _snake_case ( self: str , a: int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
__lowerCamelCase : Dict = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
__lowerCamelCase : Any = int(builtins.input() )
except ValueError:
__lowerCamelCase : str = default_choice
else:
__lowerCamelCase : Optional[int] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(a , '\n' )
return choice
| 669 | 0 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __UpperCamelCase ( __UpperCamelCase ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1024 , _UpperCamelCase=1024 , _UpperCamelCase=3.6 ):
_UpperCAmelCase = tokenizer
_UpperCAmelCase = tokenizer.bos_token_id
_UpperCAmelCase = dataset
_UpperCAmelCase = seq_length
_UpperCAmelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
_UpperCAmelCase = iter(self.dataset )
_UpperCAmelCase = True
while more_examples:
_UpperCAmelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_UpperCamelCase )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
_UpperCAmelCase = False
break
_UpperCAmelCase = tokenizer(_UpperCamelCase , truncation=_UpperCamelCase )['input_ids']
_UpperCAmelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_UpperCamelCase ) , self.seq_length ):
_UpperCAmelCase = all_token_ids[i : i + self.seq_length]
if len(_UpperCamelCase ) == self.seq_length:
yield torch.tensor(_UpperCamelCase )
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = {'streaming': True}
_UpperCAmelCase = load_dataset(args.dataset_name , split='''train''' , **SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = ConstantLengthDataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , seq_length=args.seq_length )
_UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=args.batch_size )
return eval_dataloader
def A__ ( SCREAMING_SNAKE_CASE_ : Tuple ) -> List[Any]:
"""simple docstring"""
model.eval()
_UpperCAmelCase = []
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
with torch.no_grad():
_UpperCAmelCase = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(SCREAMING_SNAKE_CASE__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_UpperCAmelCase = torch.mean(torch.cat(SCREAMING_SNAKE_CASE__ ) )
try:
_UpperCAmelCase = torch.exp(SCREAMING_SNAKE_CASE__ )
except OverflowError:
_UpperCAmelCase = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCAmelCase_ = Accelerator()
# Parse configuration
UpperCAmelCase_ = HfArgumentParser(EvaluationArguments)
UpperCAmelCase_ = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCAmelCase_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
UpperCAmelCase_ , UpperCAmelCase_ = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''') | 32 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = (CMStochasticIterativeScheduler,)
__snake_case = 10
def _snake_case ( self: Any , **a: Dict ):
__lowerCamelCase : Optional[Any] = {
'num_train_timesteps': 201,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
config.update(**a )
return config
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Any = 10
__lowerCamelCase : Any = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = self.scheduler_classes[0](**a )
scheduler.set_timesteps(a )
__lowerCamelCase : Any = scheduler.timesteps[0]
__lowerCamelCase : List[str] = scheduler.timesteps[1]
__lowerCamelCase : Union[str, Any] = self.dummy_sample
__lowerCamelCase : int = 0.1 * sample
__lowerCamelCase : Optional[Any] = scheduler.step(a , a , a ).prev_sample
__lowerCamelCase : List[str] = scheduler.step(a , a , a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self: Optional[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a )
def _snake_case ( self: List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : Tuple = self.scheduler_classes[0]
__lowerCamelCase : Tuple = self.get_scheduler_config()
__lowerCamelCase : Tuple = scheduler_class(**a )
__lowerCamelCase : int = 1
scheduler.set_timesteps(a )
__lowerCamelCase : Optional[int] = scheduler.timesteps
__lowerCamelCase : List[str] = torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(a ):
# 1. scale model input
__lowerCamelCase : List[str] = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Optional[int] = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : str = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : str = pred_prev_sample
__lowerCamelCase : List[str] = torch.sum(torch.abs(a ) )
__lowerCamelCase : str = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 1_9_2.7_6_1_4 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_1_0 ) < 1e-3
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Optional[Any] = self.get_scheduler_config()
__lowerCamelCase : int = scheduler_class(**a )
__lowerCamelCase : List[Any] = [106, 0]
scheduler.set_timesteps(timesteps=a )
__lowerCamelCase : Dict = scheduler.timesteps
__lowerCamelCase : int = torch.manual_seed(0 )
__lowerCamelCase : Any = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__lowerCamelCase : Tuple = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Tuple = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : Any = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : Any = pred_prev_sample
__lowerCamelCase : Dict = torch.sum(torch.abs(a ) )
__lowerCamelCase : Optional[Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 3_4_7.6_3_5_7 ) < 1e-2
assert abs(result_mean.item() - 0.4_5_2_7 ) < 1e-3
def _snake_case ( self: Tuple ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : int = self.get_scheduler_config()
__lowerCamelCase : List[Any] = scheduler_class(**a )
__lowerCamelCase : Optional[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(a , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a )
def _snake_case ( self: int ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [39, 30, 12, 1, 0]
__lowerCamelCase : List[Any] = len(a )
with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Dict = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a )
| 669 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=__UpperCamelCase )
class _snake_case ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase__ =field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
UpperCamelCase__ =Features({"""audio""": Audio()} )
UpperCamelCase__ =Features({"""labels""": ClassLabel} )
UpperCamelCase__ ="""audio"""
UpperCamelCase__ ="""labels"""
def snake_case_ ( self : Tuple , snake_case : int ):
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , snake_case ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCAmelCase_ :int = copy.deepcopy(self )
UpperCAmelCase_ :List[Any] = self.label_schema.copy()
UpperCAmelCase_ :Tuple = features[self.label_column]
UpperCAmelCase_ :Dict = label_schema
return task_template
@property
def snake_case_ ( self : List[Any] ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 608 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowercase_ = input('Enter image url: ').strip()
print(F"""Downloading image from {url} ...""")
lowercase_ = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
lowercase_ = soup.find('meta', {'property': 'og:image'})['content']
lowercase_ = requests.get(image_url).content
lowercase_ = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 669 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case (__UpperCamelCase , unittest.TestCase ):
lowerCAmelCase__ :Dict = ConsistencyModelPipeline
lowerCAmelCase__ :List[Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCAmelCase__ :List[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowerCAmelCase__ :Optional[Any] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def _a ( self ) -> Dict:
lowercase__ = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" ,subfolder="test_unet" ,)
return unet
@property
def _a ( self ) -> Dict:
lowercase__ = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" ,subfolder="test_unet_class_cond" ,)
return unet
def _a ( self ,UpperCAmelCase_=False ) -> Optional[int]:
if class_cond:
lowercase__ = self.dummy_cond_unet
else:
lowercase__ = self.dummy_uncond_unet
# Default to CM multistep sampler
lowercase__ = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_02 ,sigma_max=80.0 ,)
lowercase__ = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_=0 ) -> List[Any]:
if str(UpperCAmelCase_ ).startswith("mps" ):
lowercase__ = torch.manual_seed(UpperCAmelCase_ )
else:
lowercase__ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowercase__ = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _a ( self ) -> int:
lowercase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = ConsistencyModelPipeline(**UpperCAmelCase_ )
lowercase__ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowercase__ = self.get_dummy_inputs(UpperCAmelCase_ )
lowercase__ = pipe(**UpperCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _a ( self ) -> List[str]:
lowercase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components(class_cond=UpperCAmelCase_ )
lowercase__ = ConsistencyModelPipeline(**UpperCAmelCase_ )
lowercase__ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowercase__ = self.get_dummy_inputs(UpperCAmelCase_ )
lowercase__ = 0
lowercase__ = pipe(**UpperCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _a ( self ) -> Tuple:
lowercase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = ConsistencyModelPipeline(**UpperCAmelCase_ )
lowercase__ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowercase__ = self.get_dummy_inputs(UpperCAmelCase_ )
lowercase__ = 1
lowercase__ = None
lowercase__ = pipe(**UpperCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _a ( self ) -> Optional[int]:
lowercase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components(class_cond=UpperCAmelCase_ )
lowercase__ = ConsistencyModelPipeline(**UpperCAmelCase_ )
lowercase__ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowercase__ = self.get_dummy_inputs(UpperCAmelCase_ )
lowercase__ = 1
lowercase__ = None
lowercase__ = 0
lowercase__ = pipe(**UpperCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class snake_case (unittest.TestCase ):
def _a ( self ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ,UpperCAmelCase_=0 ,UpperCAmelCase_=False ,UpperCAmelCase_="cpu" ,UpperCAmelCase_=torch.floataa ,UpperCAmelCase_=(1, 3, 64, 64) ) -> Optional[Any]:
lowercase__ = torch.manual_seed(UpperCAmelCase_ )
lowercase__ = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
lowercase__ = self.get_fixed_latents(seed=UpperCAmelCase_ ,device=UpperCAmelCase_ ,dtype=UpperCAmelCase_ ,shape=UpperCAmelCase_ )
lowercase__ = latents
return inputs
def _a ( self ,UpperCAmelCase_=0 ,UpperCAmelCase_="cpu" ,UpperCAmelCase_=torch.floataa ,UpperCAmelCase_=(1, 3, 64, 64) ) -> List[str]:
if type(UpperCAmelCase_ ) == str:
lowercase__ = torch.device(UpperCAmelCase_ )
lowercase__ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowercase__ = randn_tensor(UpperCAmelCase_ ,generator=UpperCAmelCase_ ,device=UpperCAmelCase_ ,dtype=UpperCAmelCase_ )
return latents
def _a ( self ) -> int:
lowercase__ = UNetaDModel.from_pretrained("diffusers/consistency_models" ,subfolder="diffusers_cd_imagenet64_l2" )
lowercase__ = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_02 ,sigma_max=80.0 ,)
lowercase__ = ConsistencyModelPipeline(unet=UpperCAmelCase_ ,scheduler=UpperCAmelCase_ )
pipe.to(torch_device=UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowercase__ = self.get_inputs()
lowercase__ = pipe(**UpperCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _a ( self ) -> Any:
lowercase__ = UNetaDModel.from_pretrained("diffusers/consistency_models" ,subfolder="diffusers_cd_imagenet64_l2" )
lowercase__ = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_02 ,sigma_max=80.0 ,)
lowercase__ = ConsistencyModelPipeline(unet=UpperCAmelCase_ ,scheduler=UpperCAmelCase_ )
pipe.to(torch_device=UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowercase__ = self.get_inputs()
lowercase__ = 1
lowercase__ = None
lowercase__ = pipe(**UpperCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _a ( self ) -> Optional[int]:
lowercase__ = UNetaDModel.from_pretrained("diffusers/consistency_models" ,subfolder="diffusers_cd_imagenet64_l2" )
lowercase__ = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_02 ,sigma_max=80.0 ,)
lowercase__ = ConsistencyModelPipeline(unet=UpperCAmelCase_ ,scheduler=UpperCAmelCase_ )
pipe.to(torch_device=UpperCAmelCase_ ,torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowercase__ = self.get_inputs(get_fixed_latents=UpperCAmelCase_ ,device=UpperCAmelCase_ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCAmelCase_ ,enable_math=UpperCAmelCase_ ,enable_mem_efficient=UpperCAmelCase_ ):
lowercase__ = pipe(**UpperCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _a ( self ) -> Tuple:
lowercase__ = UNetaDModel.from_pretrained("diffusers/consistency_models" ,subfolder="diffusers_cd_imagenet64_l2" )
lowercase__ = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_02 ,sigma_max=80.0 ,)
lowercase__ = ConsistencyModelPipeline(unet=UpperCAmelCase_ ,scheduler=UpperCAmelCase_ )
pipe.to(torch_device=UpperCAmelCase_ ,torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowercase__ = self.get_inputs(get_fixed_latents=UpperCAmelCase_ ,device=UpperCAmelCase_ )
lowercase__ = 1
lowercase__ = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCAmelCase_ ,enable_math=UpperCAmelCase_ ,enable_mem_efficient=UpperCAmelCase_ ):
lowercase__ = pipe(**UpperCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 267 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowercase_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowercase_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
lowercase_ = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
lowercase_ = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
lowercase_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
lowercase_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 669 | 0 |
import pytest
import datasets
# Import fixture modules as plugins
lowercase : Tuple = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any]) -> List[Any]:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"]):
continue
item.add_marker(pytest.mark.unit)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any]) -> str:
'''simple docstring'''
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12")
@pytest.fixture(autouse=SCREAMING_SNAKE_CASE__)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = tmp_path_factory.getbasetemp() / 'cache'
__UpperCamelCase : Any = test_hf_cache_home / 'datasets'
__UpperCamelCase : Optional[int] = test_hf_cache_home / 'metrics'
__UpperCamelCase : List[Any] = test_hf_cache_home / 'modules'
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(SCREAMING_SNAKE_CASE__))
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(SCREAMING_SNAKE_CASE__))
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(SCREAMING_SNAKE_CASE__))
__UpperCamelCase : Union[str, Any] = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(SCREAMING_SNAKE_CASE__))
__UpperCamelCase : Optional[int] = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(SCREAMING_SNAKE_CASE__))
@pytest.fixture(autouse=SCREAMING_SNAKE_CASE__ , scope="session")
def _SCREAMING_SNAKE_CASE ( ) -> Any:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=SCREAMING_SNAKE_CASE__)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any]) -> Any:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , SCREAMING_SNAKE_CASE__)
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any]) -> List[str]:
'''simple docstring'''
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , SCREAMING_SNAKE_CASE__) | 557 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """xlm-roberta"""
def __init__( self: Optional[Any] , a: int=3_0522 , a: List[Any]=768 , a: Tuple=12 , a: List[str]=12 , a: Dict=3072 , a: List[str]="gelu" , a: Any=0.1 , a: Optional[Any]=0.1 , a: str=512 , a: Optional[int]=2 , a: int=0.0_2 , a: str=1e-12 , a: str=1 , a: List[Any]=0 , a: Dict=2 , a: Dict="absolute" , a: List[Any]=True , a: str=None , **a: List[Any] , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase : Optional[Any] = vocab_size
__lowerCamelCase : Optional[Any] = hidden_size
__lowerCamelCase : Dict = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : str = hidden_act
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : int = max_position_embeddings
__lowerCamelCase : Any = type_vocab_size
__lowerCamelCase : int = initializer_range
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : List[Any] = position_embedding_type
__lowerCamelCase : List[str] = use_cache
__lowerCamelCase : Optional[int] = classifier_dropout
class A_ ( __UpperCamelCase ):
'''simple docstring'''
@property
def _snake_case ( self: Optional[Any] ):
if self.task == "multiple-choice":
__lowerCamelCase : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 0 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
_snake_case : int = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
_snake_case : str = 'main'
# Default branch name
_snake_case : List[Any] = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
_snake_case : Dict = 'aaaaaaa'
# This commit does not exist, so we should 404.
_snake_case : Any = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
_snake_case : Union[str, Any] = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def a_ ( ):
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def a_ ( ):
print('Bonjour!' )
yield
print('Au revoir!' )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ) -> Dict:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def lowercase ( self : Tuple , lowerCAmelCase_ : int ) -> Optional[Any]:
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def lowercase ( self : List[str] , lowerCAmelCase_ : str ) -> List[Any]:
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def lowercase ( self : List[Any] ) -> Tuple:
self.assertEqual(find_labels(lowerCAmelCase_ ) , ['labels'] )
self.assertEqual(find_labels(lowerCAmelCase_ ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(lowerCAmelCase_ ) , ['start_positions', 'end_positions'] )
class _UpperCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
pass
self.assertEqual(find_labels(lowerCAmelCase_ ) , ['labels'] )
@require_tf
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
self.assertEqual(find_labels(lowerCAmelCase_ ) , ['labels'] )
self.assertEqual(find_labels(lowerCAmelCase_ ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(lowerCAmelCase_ ) , ['start_positions', 'end_positions'] )
class _UpperCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
pass
self.assertEqual(find_labels(lowerCAmelCase_ ) , ['labels'] )
@require_flax
def lowercase ( self : Union[str, Any] ) -> int:
# Flax models don't have labels
self.assertEqual(find_labels(lowerCAmelCase_ ) , [] )
self.assertEqual(find_labels(lowerCAmelCase_ ) , [] )
self.assertEqual(find_labels(lowerCAmelCase_ ) , [] )
class _UpperCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
pass
self.assertEqual(find_labels(lowerCAmelCase_ ) , [] )
| 53 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ConsistencyModelPipeline
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__snake_case = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def _snake_case ( self: str ):
__lowerCamelCase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[str] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _snake_case ( self: int , a: str=False ):
if class_cond:
__lowerCamelCase : str = self.dummy_cond_unet
else:
__lowerCamelCase : str = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCamelCase : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _snake_case ( self: int , a: List[str] , a: Any=0 ):
if str(a ).startswith('mps' ):
__lowerCamelCase : List[Any] = torch.manual_seed(a )
else:
__lowerCamelCase : Tuple = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : Optional[Any] = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components()
__lowerCamelCase : str = ConsistencyModelPipeline(**a )
__lowerCamelCase : str = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Any = self.get_dummy_inputs(a )
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[int] = ConsistencyModelPipeline(**a )
__lowerCamelCase : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(a )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : List[str] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Optional[int] = self.get_dummy_components()
__lowerCamelCase : Tuple = ConsistencyModelPipeline(**a )
__lowerCamelCase : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Tuple = self.get_dummy_inputs(a )
__lowerCamelCase : str = 1
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Any = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[int] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: List[str] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[Any] = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[Any] = ConsistencyModelPipeline(**a )
__lowerCamelCase : List[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_dummy_inputs(a )
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[str] = None
__lowerCamelCase : str = 0
__lowerCamelCase : Tuple = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self: Optional[int] , a: str=0 , a: Tuple=False , a: Tuple="cpu" , a: List[str]=torch.floataa , a: Optional[Any]=(1, 3, 64, 64) ):
__lowerCamelCase : Optional[Any] = torch.manual_seed(a )
__lowerCamelCase : Optional[int] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__lowerCamelCase : Dict = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
__lowerCamelCase : Optional[Any] = latents
return inputs
def _snake_case ( self: Any , a: Any=0 , a: List[str]="cpu" , a: Optional[Any]=torch.floataa , a: int=(1, 3, 64, 64) ):
if type(a ) == str:
__lowerCamelCase : Dict = torch.device(a )
__lowerCamelCase : Union[str, Any] = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : str = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _snake_case ( self: str ):
__lowerCamelCase : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs()
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_inputs()
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Tuple = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : int = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case ( self: Dict ):
__lowerCamelCase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : str = self.get_inputs(get_fixed_latents=a , device=a )
__lowerCamelCase : str = 1
__lowerCamelCase : Union[str, Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : str = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 669 | 0 |
'''simple docstring'''
import qiskit
def UpperCAmelCase_ (__a : List[str] , __a : List[str] ):
"""simple docstring"""
_a : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_a : Union[str, Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_a : Any = qiskit.execute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__lowerCAmelCase = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 229 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """trocr"""
__snake_case = ["""past_key_values"""]
__snake_case = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Dict , a: List[str]=5_0265 , a: Optional[Any]=1024 , a: Tuple=12 , a: Dict=16 , a: Optional[Any]=4096 , a: Optional[Any]="gelu" , a: Optional[int]=512 , a: int=0.1 , a: str=0.0 , a: Union[str, Any]=0.0 , a: Any=2 , a: Optional[int]=0.0_2 , a: Optional[Any]=0.0 , a: List[Any]=True , a: Any=False , a: int=True , a: Optional[Any]=True , a: Tuple=1 , a: Union[str, Any]=0 , a: Any=2 , **a: List[Any] , ):
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : Union[str, Any] = d_model
__lowerCamelCase : List[str] = decoder_layers
__lowerCamelCase : Optional[Any] = decoder_attention_heads
__lowerCamelCase : List[str] = decoder_ffn_dim
__lowerCamelCase : Optional[int] = activation_function
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : Dict = dropout
__lowerCamelCase : int = attention_dropout
__lowerCamelCase : List[str] = activation_dropout
__lowerCamelCase : Union[str, Any] = init_std
__lowerCamelCase : Tuple = decoder_layerdrop
__lowerCamelCase : str = use_cache
__lowerCamelCase : List[Any] = scale_embedding
__lowerCamelCase : Any = use_learned_position_embeddings
__lowerCamelCase : List[Any] = layernorm_embedding
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
| 669 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_lowerCAmelCase :Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCAmelCase :Tuple = {
'vocab_file': {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt',
},
'tokenizer_file': {
'unc-nlp/lxmert-base-uncased': (
'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_lowerCAmelCase :List[str] = {
'unc-nlp/lxmert-base-uncased': 512,
}
_lowerCAmelCase :Union[str, Any] = {
'unc-nlp/lxmert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_INIT_CONFIGURATION
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =LxmertTokenizer
def __init__( self , A=None , A=None , A=True , A="[UNK]" , A="[SEP]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , A=True , A=None , **A , ) -> Union[str, Any]:
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
_UpperCAmelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , A ) != do_lower_case
or normalizer_state.get('''strip_accents''' , A ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , A ) != tokenize_chinese_chars
):
_UpperCAmelCase : Union[str, Any] = getattr(A , normalizer_state.pop('''type''' ) )
_UpperCAmelCase : str = do_lower_case
_UpperCAmelCase : str = strip_accents
_UpperCAmelCase : Any = tokenize_chinese_chars
_UpperCAmelCase : Dict = normalizer_class(**A )
_UpperCAmelCase : List[str] = do_lower_case
def __lowerCAmelCase ( self , A , A=None ) -> Optional[Any]:
_UpperCAmelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self , A , A = None ) -> Optional[Any]:
_UpperCAmelCase : Optional[int] = [self.sep_token_id]
_UpperCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self , A , A = None ) -> List[str]:
_UpperCAmelCase : Any = self._tokenizer.model.save(A , name=A )
return tuple(A )
| 506 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """tokenizer"""]
__snake_case = """CLIPImageProcessor"""
__snake_case = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self: Union[str, Any] , a: int=None , a: List[str]=None , **a: str ):
__lowerCamelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : str = kwargs.pop('feature_extractor' )
__lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: List[Any]=None , a: List[str]=None , a: int=None , **a: List[Any] ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__lowerCamelCase : Dict = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
__lowerCamelCase : Tuple = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
__lowerCamelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _snake_case ( self: List[Any] , *a: Optional[Any] , **a: int ):
return self.tokenizer.batch_decode(*a , **a )
def _snake_case ( self: Any , *a: Union[str, Any] , **a: Optional[Any] ):
return self.tokenizer.decode(*a , **a )
@property
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = self.tokenizer.model_input_names
__lowerCamelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 179 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _snake_case ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def _snake_case ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase : Any = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def _snake_case ( self: Dict ):
torch.manual_seed(0 )
__lowerCamelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(a )
def _snake_case ( self: List[str] ):
__lowerCamelCase : Union[str, Any] = self.dummy_uncond_unet
__lowerCamelCase : List[str] = DDIMScheduler()
__lowerCamelCase : str = self.dummy_vq_model
__lowerCamelCase : Optional[int] = LDMPipeline(unet=a , vqvae=a , scheduler=a )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
__lowerCamelCase : Optional[int] = torch.manual_seed(0 )
__lowerCamelCase : Any = ldm(generator=a , num_inference_steps=2 , output_type='numpy' ).images
__lowerCamelCase : Tuple = torch.manual_seed(0 )
__lowerCamelCase : Dict = ldm(generator=a , num_inference_steps=2 , output_type='numpy' , return_dict=a )[0]
__lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[int] = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
__lowerCamelCase : str = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
__lowerCamelCase : Dict = torch.manual_seed(0 )
__lowerCamelCase : int = ldm(generator=a , num_inference_steps=5 , output_type='numpy' ).images
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__lowerCamelCase : List[Any] = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
__lowerCamelCase : Union[str, Any] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 669 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( __snake_case : Tuple , __snake_case : Any = None , __snake_case : Optional[int] = None , __snake_case : Union[str, Any] = False , ) -> Tuple:
lowerCAmelCase_ = cipher_alphabet or [chr(SCREAMING_SNAKE_CASE__) for i in range(97 , 123)]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCAmelCase_ = {
'a': 0.0_8_4_9_7,
'b': 0.0_1_4_9_2,
'c': 0.0_2_2_0_2,
'd': 0.0_4_2_5_3,
'e': 0.1_1_1_6_2,
'f': 0.0_2_2_2_8,
'g': 0.0_2_0_1_5,
'h': 0.0_6_0_9_4,
'i': 0.0_7_5_4_6,
'j': 0.0_0_1_5_3,
'k': 0.0_1_2_9_2,
'l': 0.0_4_0_2_5,
'm': 0.0_2_4_0_6,
'n': 0.0_6_7_4_9,
'o': 0.0_7_5_0_7,
'p': 0.0_1_9_2_9,
'q': 0.0_0_0_9_5,
'r': 0.0_7_5_8_7,
's': 0.0_6_3_2_7,
't': 0.0_9_3_5_6,
'u': 0.0_2_7_5_8,
'v': 0.0_0_9_7_8,
'w': 0.0_2_5_6_0,
'x': 0.0_0_1_5_0,
'y': 0.0_1_9_9_4,
'z': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
lowerCAmelCase_ = frequencies_dict
if not case_sensitive:
lowerCAmelCase_ = ciphertext.lower()
# Chi squared statistic values
lowerCAmelCase_ = {}
# cycle through all of the shifts
for shift in range(len(SCREAMING_SNAKE_CASE__)):
lowerCAmelCase_ = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCAmelCase_ = (alphabet_letters.index(letter.lower()) - shift) % len(
SCREAMING_SNAKE_CASE__)
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCAmelCase_ = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCAmelCase_ = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase_ = decrypted_with_shift.lower().count(SCREAMING_SNAKE_CASE__)
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase_ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase_ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase_ = decrypted_with_shift.count(SCREAMING_SNAKE_CASE__)
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase_ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase_ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCAmelCase_ = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(__snake_case : Any) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCAmelCase_ = min(
SCREAMING_SNAKE_CASE__ , key=SCREAMING_SNAKE_CASE__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
lowerCAmelCase_
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 274 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = 'Usage of script: script_name <size_of_canvas:int>'
lowercase_ = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = [[False for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
return canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
for i, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for j, _ in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = bool(random.getrandbits(1 ) )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = np.array(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for c, pt in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = __judge_point(
SCREAMING_SNAKE_CASE__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__lowerCamelCase : Any = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__lowerCamelCase : list[list[bool]] = current_canvas.tolist()
return return_canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Optional[Any] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__lowerCamelCase : Tuple = pt
if pt:
if alive < 2:
__lowerCamelCase : Optional[Any] = False
elif alive == 2 or alive == 3:
__lowerCamelCase : Any = True
elif alive > 3:
__lowerCamelCase : Dict = False
else:
if alive == 3:
__lowerCamelCase : Tuple = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ ,lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(['w', 'k'])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 669 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : List[str]):
lowerCamelCase : List[str] = u
for i in range(1 , SCREAMING_SNAKE_CASE__):
lowerCamelCase : str = temp * (u - i)
return temp
def UpperCAmelCase ( ):
lowerCamelCase : Optional[int] = int(input('enter the numbers of values: '))
lowerCamelCase : list[list[float]] = []
for _ in range(SCREAMING_SNAKE_CASE__):
y.append([])
for i in range(SCREAMING_SNAKE_CASE__):
for j in range(SCREAMING_SNAKE_CASE__):
y[i].append(SCREAMING_SNAKE_CASE__)
lowerCamelCase : Dict = 0
print('enter the values of parameters in a list: ')
lowerCamelCase : str = list(map(SCREAMING_SNAKE_CASE__ , input().split()))
print('enter the values of corresponding parameters: ')
for i in range(SCREAMING_SNAKE_CASE__):
lowerCamelCase : int = float(input())
lowerCamelCase : List[str] = int(input('enter the value to interpolate: '))
lowerCamelCase : Tuple = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , SCREAMING_SNAKE_CASE__):
for j in range(n - i):
lowerCamelCase : List[Any] = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase : str = y[0][0]
for i in range(1 , SCREAMING_SNAKE_CASE__):
summ += (ucal(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) * y[0][i]) / math.factorial(SCREAMING_SNAKE_CASE__)
print(F'''the value at {value} is {summ}''')
if __name__ == "__main__":
main()
| 320 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """char"""
__snake_case = """bpe"""
__snake_case = """wp"""
lowercase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """char_tokenizer"""]
__snake_case = """ViTImageProcessor"""
__snake_case = """MgpstrTokenizer"""
def __init__( self: int , a: Dict=None , a: Optional[int]=None , **a: List[str] ):
__lowerCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : Optional[Any] = kwargs.pop('feature_extractor' )
__lowerCamelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
__lowerCamelCase : Any = tokenizer
__lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('gpt2' )
__lowerCamelCase : int = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: Optional[int]=None , a: List[Any]=None , a: int=None , **a: str ):
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCamelCase : Dict = self.image_processor(a , return_tensors=a , **a )
if text is not None:
__lowerCamelCase : Dict = self.char_tokenizer(a , return_tensors=a , **a )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase : List[str] = encodings['input_ids']
return inputs
def _snake_case ( self: List[str] , a: List[Any] ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = sequences
__lowerCamelCase : List[str] = char_preds.size(0 )
__lowerCamelCase , __lowerCamelCase : str = self._decode_helper(a , 'char' )
__lowerCamelCase , __lowerCamelCase : Optional[int] = self._decode_helper(a , 'bpe' )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self._decode_helper(a , 'wp' )
__lowerCamelCase : Tuple = []
__lowerCamelCase : List[Any] = []
for i in range(a ):
__lowerCamelCase : List[Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
__lowerCamelCase : Optional[int] = [char_strs[i], bpe_strs[i], wp_strs[i]]
__lowerCamelCase : Any = scores.index(max(a ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Optional[int] = final_strs
__lowerCamelCase : Dict = final_scores
__lowerCamelCase : Dict = char_strs
__lowerCamelCase : List[Any] = bpe_strs
__lowerCamelCase : Tuple = wp_strs
return out
def _snake_case ( self: int , a: Optional[int] , a: Optional[Any] ):
if format == DecodeType.CHARACTER:
__lowerCamelCase : Optional[Any] = self.char_decode
__lowerCamelCase : Union[str, Any] = 1
__lowerCamelCase : List[str] = '[s]'
elif format == DecodeType.BPE:
__lowerCamelCase : Dict = self.bpe_decode
__lowerCamelCase : List[str] = 2
__lowerCamelCase : Any = '#'
elif format == DecodeType.WORDPIECE:
__lowerCamelCase : List[str] = self.wp_decode
__lowerCamelCase : int = 102
__lowerCamelCase : Dict = '[SEP]'
else:
raise ValueError(F'Format {format} is not supported.' )
__lowerCamelCase , __lowerCamelCase : int = [], []
__lowerCamelCase : Tuple = pred_logits.size(0 )
__lowerCamelCase : List[Any] = pred_logits.size(1 )
__lowerCamelCase , __lowerCamelCase : Dict = pred_logits.topk(1 , dim=-1 , largest=a , sorted=a )
__lowerCamelCase : List[str] = preds_index.view(-1 , a )[:, 1:]
__lowerCamelCase : Dict = decoder(a )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = torch.nn.functional.softmax(a , dim=2 ).max(dim=2 )
__lowerCamelCase : List[str] = preds_max_prob[:, 1:]
for index in range(a ):
__lowerCamelCase : str = preds_str[index].find(a )
__lowerCamelCase : Tuple = preds_str[index][:pred_eos]
__lowerCamelCase : Any = preds_index[index].cpu().tolist()
__lowerCamelCase : Any = pred_index.index(a ) if eos_token in pred_index else -1
__lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1]
__lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(a )
conf_scores.append(a )
return dec_strs, conf_scores
def _snake_case ( self: Tuple , a: Optional[int] ):
__lowerCamelCase : Dict = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(a )]
return decode_strs
def _snake_case ( self: Optional[int] , a: Tuple ):
return self.bpe_tokenizer.batch_decode(a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : int = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(a )]
return decode_strs
| 669 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class SCREAMING_SNAKE_CASE_ ( __UpperCamelCase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def lowerCamelCase__ ( lowerCAmelCase : ArgumentParser ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError()
| 279 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
__lowerCamelCase : Optional[int] = TOKENIZER_CLASSES
else:
__lowerCamelCase : Union[str, Any] = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE__ , tokenizer_name + 'Fast' )}
logger.info(f'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
__lowerCamelCase : int = TOKENIZER_CLASSES[tokenizer_name]
__lowerCamelCase : Optional[int] = True
if checkpoint_name is None:
__lowerCamelCase : List[Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowerCamelCase : Optional[Any] = [checkpoint_name]
logger.info(f'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(f'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
__lowerCamelCase : Tuple = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ )
# Save fast tokenizer
logger.info(f'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowerCamelCase , __lowerCamelCase : Tuple = checkpoint.split('/' )
__lowerCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif add_prefix:
__lowerCamelCase : Any = checkpoint
__lowerCamelCase : Dict = dump_path
else:
__lowerCamelCase : List[str] = None
__lowerCamelCase : Optional[int] = dump_path
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowerCamelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowerCamelCase : int = file_path.split(SCREAMING_SNAKE_CASE__ )[-1][0]
if next_char == "/":
__lowerCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = None
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
__lowerCamelCase : Dict = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ , filename_prefix=SCREAMING_SNAKE_CASE__ )
logger.info(f'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(SCREAMING_SNAKE_CASE__ )
logger.info(f'=> removing {file_name}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
lowercase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 0 |
from __future__ import annotations
import math
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Any:
"""simple docstring"""
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = [n]
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> int:
"""simple docstring"""
if len(str(SCREAMING_SNAKE_CASE__ ) ) > 3:
if not is_prime(int(str(SCREAMING_SNAKE_CASE__ )[-3:] ) ) or not is_prime(int(str(SCREAMING_SNAKE_CASE__ )[:3] ) ):
return False
return True
def A__ ( SCREAMING_SNAKE_CASE_ : Tuple = 11 ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = 13
while len(SCREAMING_SNAKE_CASE__ ) != count:
if validate(SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase = list_truncated_nums(SCREAMING_SNAKE_CASE__ )
if all(is_prime(SCREAMING_SNAKE_CASE__ ) for i in list_nums ):
list_truncated_primes.append(SCREAMING_SNAKE_CASE__ )
num += 2
return list_truncated_primes
def A__ ( ) -> List[Any]:
"""simple docstring"""
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f'''{sum(compute_truncated_primes(11)) = }''') | 32 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : List[str] = PegasusTokenizer(a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self: List[Any] ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def _snake_case ( self: Tuple , **a: List[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: List[Any] , a: int ):
return ("This is a test", "This is a test")
def _snake_case ( self: Any ):
__lowerCamelCase : Dict = '</s>'
__lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(a ) , 1103 )
def _snake_case ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
__lowerCamelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
__lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
def _snake_case ( self: int ):
__lowerCamelCase : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__lowerCamelCase : Tuple = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
__lowerCamelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
__lowerCamelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
__lowerCamelCase : int = 'To ensure a smooth flow of bank resolutions.'
__lowerCamelCase : Union[str, Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
__lowerCamelCase : List[str] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _snake_case ( self: str ):
__lowerCamelCase : List[str] = ['This is going to be way too long.' * 150, 'short example']
__lowerCamelCase : Tuple = ['not super long but more than 5 tokens', 'tiny']
__lowerCamelCase : Union[str, Any] = self._large_tokenizer(a , padding=a , truncation=a , return_tensors='pt' )
__lowerCamelCase : List[str] = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
@slow
def _snake_case ( self: List[str] ):
# fmt: off
__lowerCamelCase : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: str ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : str = PegasusTokenizer(a , offset=0 , mask_token_sent=a , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self: List[str] ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def _snake_case ( self: Union[str, Any] , **a: Dict ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: List[str] , a: Any ):
return ("This is a test", "This is a test")
def _snake_case ( self: Any ):
__lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
__lowerCamelCase : int = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
__lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
@require_torch
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Union[str, Any] = ['This is going to be way too long.' * 1000, 'short example']
__lowerCamelCase : Tuple = ['not super long but more than 5 tokens', 'tiny']
__lowerCamelCase : str = self._large_tokenizer(a , padding=a , truncation=a , return_tensors='pt' )
__lowerCamelCase : Any = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
def _snake_case ( self: Any ):
__lowerCamelCase : int = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
__lowerCamelCase : Dict = self._large_tokenizer(a ).input_ids
self.assertListEqual(
a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 669 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 608 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class snake_case (__UpperCamelCase ):
lowerCAmelCase__ :str = ["image_processor", "tokenizer"]
lowerCAmelCase__ :str = "BlipImageProcessor"
lowerCAmelCase__ :Dict = "AutoTokenizer"
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> int:
super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ )
# add QFormer tokenizer
lowercase__ = qformer_tokenizer
def __call__( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = True ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,) -> Dict:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
lowercase__ = BatchFeature()
if text is not None:
lowercase__ = self.tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
encoding.update(UpperCAmelCase_ )
lowercase__ = self.qformer_tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
lowercase__ = qformer_text_encoding.pop("input_ids" )
lowercase__ = qformer_text_encoding.pop("attention_mask" )
if images is not None:
lowercase__ = self.image_processor(UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ )
encoding.update(UpperCAmelCase_ )
return encoding
def _a ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def _a ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ) -> Optional[Any]:
return self.tokenizer.decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _a ( self ) -> List[str]:
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _a ( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ) -> List[Any]:
if os.path.isfile(UpperCAmelCase_ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(UpperCAmelCase_ ,exist_ok=UpperCAmelCase_ )
lowercase__ = os.path.join(UpperCAmelCase_ ,"qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(UpperCAmelCase_ )
return super().save_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
@classmethod
def _a ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ) -> Dict:
lowercase__ = AutoTokenizer.from_pretrained(UpperCAmelCase_ ,subfolder="qformer_tokenizer" )
lowercase__ = cls._get_arguments_from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
args.append(UpperCAmelCase_ )
return cls(*UpperCAmelCase_ )
| 267 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = 2
while i * i <= n:
__lowerCamelCase : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCamelCase__ ( ):
__lowerCamelCase : str = 1
__lowerCamelCase : List[str] = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 669 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : List[Any] = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 557 |
import numpy as np
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] ):
__lowerCamelCase : int = (0, 0)
__lowerCamelCase : List[str] = None
__lowerCamelCase : int = 0
__lowerCamelCase : int = 0
__lowerCamelCase : Union[str, Any] = 0
def __eq__( self: Optional[int] , a: List[Any] ):
return self.position == cell.position
def _snake_case ( self: Any ):
print(self.position )
class A_ :
'''simple docstring'''
def __init__( self: str , a: List[str]=(5, 5) ):
__lowerCamelCase : Optional[Any] = np.zeros(a )
__lowerCamelCase : List[str] = world_size[0]
__lowerCamelCase : Optional[int] = world_size[1]
def _snake_case ( self: List[Any] ):
print(self.w )
def _snake_case ( self: Optional[int] , a: str ):
__lowerCamelCase : Tuple = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__lowerCamelCase : Optional[int] = cell.position[0]
__lowerCamelCase : List[str] = cell.position[1]
__lowerCamelCase : Dict = []
for n in neughbour_cord:
__lowerCamelCase : Dict = current_x + n[0]
__lowerCamelCase : Optional[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__lowerCamelCase : Optional[Any] = Cell()
__lowerCamelCase : Any = (x, y)
__lowerCamelCase : Dict = cell
neighbours.append(a )
return neighbours
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = []
__lowerCamelCase : int = []
_open.append(SCREAMING_SNAKE_CASE__ )
while _open:
__lowerCamelCase : Union[str, Any] = np.argmin([n.f for n in _open] )
__lowerCamelCase : int = _open[min_f]
_closed.append(_open.pop(SCREAMING_SNAKE_CASE__ ) )
if current == goal:
break
for n in world.get_neigbours(SCREAMING_SNAKE_CASE__ ):
for c in _closed:
if c == n:
continue
__lowerCamelCase : Optional[int] = current.g + 1
__lowerCamelCase , __lowerCamelCase : int = n.position
__lowerCamelCase , __lowerCamelCase : Tuple = goal.position
__lowerCamelCase : Dict = (ya - ya) ** 2 + (xa - xa) ** 2
__lowerCamelCase : str = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = []
while current.parent is not None:
path.append(current.position )
__lowerCamelCase : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowercase_ = Gridworld()
# Start position and goal
lowercase_ = Cell()
lowercase_ = (0, 0)
lowercase_ = Cell()
lowercase_ = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
lowercase_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowercase_ = 1
print(world.w)
| 669 | 0 |
def a_ ( lowerCAmelCase_ : int ):
if n_term == "":
return []
__lowerCAmelCase = []
for temp in range(int(SCREAMING_SNAKE_CASE__ ) ):
series.append(F"""1/{temp + 1}""" if series else '1' )
return series
if __name__ == "__main__":
_snake_case : Optional[int] = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 53 |
import math
from datetime import datetime, timedelta
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = year % 19
__lowerCamelCase : int = year % 4
__lowerCamelCase : Any = year % 7
__lowerCamelCase : Dict = math.floor(year / 100 )
__lowerCamelCase : str = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__lowerCamelCase : Optional[int] = leap_day_inhibits / 4
__lowerCamelCase : str = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__lowerCamelCase : Optional[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowerCamelCase : Optional[int] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__lowerCamelCase : Tuple = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 18 )
else:
return datetime(SCREAMING_SNAKE_CASE__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowercase_ = 'will be' if year > datetime.now().year else 'was'
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 669 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=__UpperCamelCase ):
"""simple docstring"""
__UpperCAmelCase : str = ['''speech''']
def __init__( self : List[str] ,*_a : Any ,**_a : Dict ):
'''simple docstring'''
requires_backends(self ,['speech'] )
class UpperCAmelCase__ ( metaclass=__UpperCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ['''speech''']
def __init__( self : Optional[int] ,*_a : Tuple ,**_a : List[str] ):
'''simple docstring'''
requires_backends(self ,['speech'] )
| 229 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A_ ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__snake_case = 1
@register_to_config
def __init__( self: str , a: str=2000 , a: List[str]=0.1 , a: Any=20 , a: Dict=1e-3 ):
__lowerCamelCase : Dict = None
__lowerCamelCase : Any = None
__lowerCamelCase : Optional[int] = None
def _snake_case ( self: int , a: str , a: Union[str, torch.device] = None ):
__lowerCamelCase : int = torch.linspace(1 , self.config.sampling_eps , a , device=a )
def _snake_case ( self: List[Any] , a: Union[str, Any] , a: Tuple , a: Optional[Any] , a: Dict=None ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowerCamelCase : Tuple = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowerCamelCase : Optional[int] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowerCamelCase : Optional[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowerCamelCase : List[str] = std.unsqueeze(-1 )
__lowerCamelCase : Any = -score / std
# compute
__lowerCamelCase : List[Any] = -1.0 / len(self.timesteps )
__lowerCamelCase : Any = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowerCamelCase : Dict = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowerCamelCase : int = beta_t.unsqueeze(-1 )
__lowerCamelCase : Any = -0.5 * beta_t * x
__lowerCamelCase : List[Any] = torch.sqrt(a )
__lowerCamelCase : Tuple = drift - diffusion**2 * score
__lowerCamelCase : str = x + drift * dt
# add noise
__lowerCamelCase : Any = randn_tensor(x.shape , layout=x.layout , generator=a , device=x.device , dtype=x.dtype )
__lowerCamelCase : Any = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self: Optional[int] ):
return self.config.num_train_timesteps
| 669 | 0 |
"""simple docstring"""
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase :Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( __UpperCamelCase ,unittest.TestCase ):
'''simple docstring'''
a__ =GPTSwaTokenizer
a__ =False
a__ =True
a__ =False
def __lowerCAmelCase ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : Any = GPTSwaTokenizer(A , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , A ) -> Union[str, Any]:
_UpperCAmelCase : int = 'This is a test'
_UpperCAmelCase : List[Any] = 'This is a test'
return input_text, output_text
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Union[str, Any] = '<s>'
_UpperCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(A ) , 2_0_0_0 )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : List[str] = GPTSwaTokenizer(A )
_UpperCAmelCase : Any = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
_UpperCAmelCase : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
# fmt: off
self.assertListEqual(
A , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
_UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , )
_UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(A )
# fmt: off
self.assertListEqual(
A , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] )
# fmt: on
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : int = GPTSwaTokenizer(A )
_UpperCAmelCase : Optional[Any] = ['This is a test', 'I was born in 92000, and this is falsé.']
_UpperCAmelCase : Optional[Any] = [
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(A , A ):
self.assertListEqual(tokenizer.encode_fast(A ) , A )
# Test that decode_fast returns the input text
for text, token_ids in zip(A , A ):
self.assertEqual(tokenizer.decode_fast(A ) , A )
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
_UpperCAmelCase : Union[str, Any] = {'input_ids': [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=A , )
| 506 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = int(SCREAMING_SNAKE_CASE__ )
if n_element < 1:
__lowerCamelCase : str = ValueError('a should be a positive number' )
raise my_error
__lowerCamelCase : Tuple = [1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = (0, 0, 0)
__lowerCamelCase : Any = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowercase_ = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
lowercase_ = hamming(int(n))
print('-----------------------------------------------------')
print(F"""The list with nth numbers is: {hamming_numbers}""")
print('-----------------------------------------------------')
| 669 | 0 |
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str="shi-labs/oneformer_demo" ):
'''simple docstring'''
with open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) as f:
__lowerCamelCase : List[str] =json.load(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : str ={}
__lowerCamelCase : Dict =[]
__lowerCamelCase : Any =[]
for key, info in class_info.items():
__lowerCamelCase : List[Any] =info['name']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase : Optional[Any] =thing_ids
__lowerCamelCase : str =class_names
return metadata
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :int , __lowercase :Optional[Any] , __lowercase :Dict=7 , __lowercase :Optional[int]=3 , __lowercase :Dict=30 , __lowercase :Optional[Any]=400 , __lowercase :int=None , __lowercase :List[Any]=True , __lowercase :List[str]=True , __lowercase :List[Any]=[0.5, 0.5, 0.5] , __lowercase :Optional[Any]=[0.5, 0.5, 0.5] , __lowercase :Tuple=10 , __lowercase :Dict=False , __lowercase :Union[str, Any]=255 , __lowercase :Optional[Any]="shi-labs/oneformer_demo" , __lowercase :Optional[int]="ade20k_panoptic.json" , __lowercase :Dict=10 , ):
__lowerCamelCase : Optional[Any] =parent
__lowerCamelCase : Dict =batch_size
__lowerCamelCase : str =num_channels
__lowerCamelCase : Union[str, Any] =min_resolution
__lowerCamelCase : int =max_resolution
__lowerCamelCase : Union[str, Any] =do_resize
__lowerCamelCase : int ={'shortest_edge': 32, 'longest_edge': 1333} if size is None else size
__lowerCamelCase : Tuple =do_normalize
__lowerCamelCase : List[Any] =image_mean
__lowerCamelCase : str =image_std
__lowerCamelCase : Any =class_info_file
__lowerCamelCase : str =prepare_metadata(__lowercase , __lowercase )
__lowerCamelCase : Union[str, Any] =num_text
__lowerCamelCase : Any =repo_path
# for the post_process_functions
__lowerCamelCase : int =2
__lowerCamelCase : Optional[int] =10
__lowerCamelCase : Tuple =10
__lowerCamelCase : Optional[int] =3
__lowerCamelCase : Dict =4
__lowerCamelCase : Union[str, Any] =num_labels
__lowerCamelCase : Optional[int] =do_reduce_labels
__lowerCamelCase : List[str] =ignore_index
def __lowercase ( self :Tuple ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def __lowercase ( self :Any , __lowercase :List[Any] , __lowercase :str=False ):
if not batched:
__lowerCamelCase : Union[str, Any] =image_inputs[0]
if isinstance(__lowercase , Image.Image ):
__lowerCamelCase : Tuple =image.size
else:
__lowerCamelCase : Tuple =image.shape[1], image.shape[2]
if w < h:
__lowerCamelCase : Optional[int] =int(self.size['''shortest_edge'''] * h / w )
__lowerCamelCase : Optional[Any] =self.size['shortest_edge']
elif w > h:
__lowerCamelCase : List[str] =self.size['shortest_edge']
__lowerCamelCase : List[str] =int(self.size['''shortest_edge'''] * w / h )
else:
__lowerCamelCase : List[str] =self.size['shortest_edge']
__lowerCamelCase : Tuple =self.size['shortest_edge']
else:
__lowerCamelCase : List[str] =[]
for image in image_inputs:
__lowerCamelCase : int =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCamelCase : str =max(__lowercase , key=lambda __lowercase : item[0] )[0]
__lowerCamelCase : str =max(__lowercase , key=lambda __lowercase : item[1] )[1]
return expected_height, expected_width
def __lowercase ( self :Union[str, Any] ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__snake_case : Dict = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__snake_case : List[Any] = image_processing_class
def __lowercase ( self :int ):
__lowerCamelCase : Tuple =OneFormerImageProcessorTester(self )
@property
def __lowercase ( self :int ):
return self.image_processing_tester.prepare_image_processor_dict()
def __lowercase ( self :Optional[int] ):
__lowerCamelCase : List[Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowercase , '''image_std''' ) )
self.assertTrue(hasattr(__lowercase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowercase , '''size''' ) )
self.assertTrue(hasattr(__lowercase , '''ignore_index''' ) )
self.assertTrue(hasattr(__lowercase , '''class_info_file''' ) )
self.assertTrue(hasattr(__lowercase , '''num_text''' ) )
self.assertTrue(hasattr(__lowercase , '''repo_path''' ) )
self.assertTrue(hasattr(__lowercase , '''metadata''' ) )
self.assertTrue(hasattr(__lowercase , '''do_reduce_labels''' ) )
def __lowercase ( self :int ):
pass
def __lowercase ( self :Any ):
# Initialize image_processor
__lowerCamelCase : Dict =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase : Optional[Any] =prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
__lowerCamelCase : Optional[Any] =image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
__lowerCamelCase : List[str] =self.image_processing_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : List[str] =self.image_processing_tester.get_expected_values(__lowercase , batched=__lowercase )
__lowerCamelCase : List[str] =image_processor(
__lowercase , ['''semantic'''] * len(__lowercase ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self :int ):
# Initialize image_processor
__lowerCamelCase : str =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase : Optional[Any] =prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
__lowerCamelCase : Tuple =image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
__lowerCamelCase : List[Any] =self.image_processing_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : Optional[int] =self.image_processing_tester.get_expected_values(__lowercase , batched=__lowercase )
__lowerCamelCase : Tuple =image_processor(
__lowercase , ['''semantic'''] * len(__lowercase ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self :Optional[int] ):
# Initialize image_processor
__lowerCamelCase : Tuple =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase : Tuple =prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
__lowerCamelCase : Any =image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
__lowerCamelCase : Optional[Any] =self.image_processing_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : Tuple =self.image_processing_tester.get_expected_values(__lowercase , batched=__lowercase )
__lowerCamelCase : str =image_processor(
__lowercase , ['''semantic'''] * len(__lowercase ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self :str , __lowercase :Optional[Any]=False , __lowercase :Any=False , __lowercase :Tuple="np" ):
__lowerCamelCase : Tuple =self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__lowerCamelCase : Optional[Any] =self.image_processing_tester.num_labels
__lowerCamelCase : str =None
__lowerCamelCase : Dict =None
__lowerCamelCase : Dict =prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowercase )
if with_segmentation_maps:
__lowerCamelCase : List[str] =num_labels
if is_instance_map:
__lowerCamelCase : str =list(range(__lowercase ) ) * 2
__lowerCamelCase : Optional[int] =dict(enumerate(__lowercase ) )
__lowerCamelCase : str =[
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__lowerCamelCase : Dict =[Image.fromarray(__lowercase ) for annotation in annotations]
__lowerCamelCase : Dict =image_processor(
__lowercase , ['''semantic'''] * len(__lowercase ) , __lowercase , return_tensors='''pt''' , instance_id_to_semantic_id=__lowercase , pad_and_return_pixel_mask=__lowercase , )
return inputs
def __lowercase ( self :str ):
pass
def __lowercase ( self :int ):
def common(__lowercase :str=False , __lowercase :Union[str, Any]=None ):
__lowerCamelCase : Optional[Any] =self.comm_get_image_processor_inputs(
with_segmentation_maps=__lowercase , is_instance_map=__lowercase , segmentation_type=__lowercase )
__lowerCamelCase : int =inputs['mask_labels']
__lowerCamelCase : Any =inputs['class_labels']
__lowerCamelCase : Optional[Any] =inputs['pixel_values']
__lowerCamelCase : str =inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(__lowercase , __lowercase , __lowercase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(__lowercase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=__lowercase )
common(is_instance_map=__lowercase , segmentation_type='''pil''' )
common(is_instance_map=__lowercase , segmentation_type='''pil''' )
def __lowercase ( self :Any ):
__lowerCamelCase : Any =np.zeros((20, 50) )
__lowerCamelCase : int =1
__lowerCamelCase : List[Any] =1
__lowerCamelCase : Optional[Any] =1
__lowerCamelCase : Optional[Any] =binary_mask_to_rle(__lowercase )
self.assertEqual(len(__lowercase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def __lowercase ( self :str ):
__lowerCamelCase : Tuple =self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
__lowerCamelCase : Any =self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCamelCase : Tuple =fature_extractor.post_process_semantic_segmentation(__lowercase )
self.assertEqual(len(__lowercase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__lowerCamelCase : Optional[Any] =[(1, 4) for i in range(self.image_processing_tester.batch_size )]
__lowerCamelCase : Union[str, Any] =fature_extractor.post_process_semantic_segmentation(__lowercase , target_sizes=__lowercase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def __lowercase ( self :Dict ):
__lowerCamelCase : Optional[Any] =self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
__lowerCamelCase : str =self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCamelCase : Dict =image_processor.post_process_instance_segmentation(__lowercase , threshold=0 )
self.assertTrue(len(__lowercase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , __lowercase )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def __lowercase ( self :List[Any] ):
__lowerCamelCase : List[str] =self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
__lowerCamelCase : str =self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCamelCase : Tuple =image_processor.post_process_panoptic_segmentation(__lowercase , threshold=0 )
self.assertTrue(len(__lowercase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , __lowercase )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 179 |
import unittest
from knapsack import greedy_knapsack as kp
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = [10, 20, 30, 40, 50, 60]
__lowerCamelCase : List[str] = [2, 4, 6, 8, 10, 12]
__lowerCamelCase : Tuple = 100
self.assertEqual(kp.calc_profit(a , a , a ) , 210 )
def _snake_case ( self: str ):
self.assertRaisesRegex(a , 'max_weight must greater than zero.' )
def _snake_case ( self: List[str] ):
self.assertRaisesRegex(a , 'Weight can not be negative.' )
def _snake_case ( self: Dict ):
self.assertRaisesRegex(a , 'Profit can not be negative.' )
def _snake_case ( self: List[str] ):
self.assertRaisesRegex(a , 'max_weight must greater than zero.' )
def _snake_case ( self: Any ):
self.assertRaisesRegex(
a , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 669 | 0 |
'''simple docstring'''
def snake_case_ ( __snake_case : List[str] = 50) -> Any:
lowerCAmelCase_ = [[0] * 3 for _ in range(length + 1)]
for row_length in range(length + 1):
for tile_length in range(2 , 5):
for tile_start in range(row_length - tile_length + 1):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length])
if __name__ == "__main__":
print(f'''{solution() = }''')
| 274 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any]=2 , a: str=3 , a: Any=4 , a: Union[str, Any]=2 , a: Tuple=7 , a: int=True , a: Tuple=True , a: List[str]=True , a: Union[str, Any]=True , a: str=99 , a: Tuple=36 , a: int=2 , a: Dict=4 , a: Union[str, Any]=37 , a: List[str]="gelu" , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Dict=512 , a: Union[str, Any]=16 , a: str=2 , a: int=0.0_2 , a: Optional[Any]=6 , a: Optional[int]=6 , a: Dict=3 , a: Optional[Any]=4 , a: Optional[Any]=None , a: Dict=1000 , ):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : Optional[int] = num_channels
__lowerCamelCase : str = image_size
__lowerCamelCase : int = patch_size
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : Dict = use_input_mask
__lowerCamelCase : Any = use_token_type_ids
__lowerCamelCase : List[str] = use_labels
__lowerCamelCase : str = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : List[Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : Dict = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : int = type_sequence_label_size
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : List[str] = coordinate_size
__lowerCamelCase : int = shape_size
__lowerCamelCase : Union[str, Any] = num_labels
__lowerCamelCase : int = num_choices
__lowerCamelCase : int = scope
__lowerCamelCase : Any = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCamelCase : Any = text_seq_length
__lowerCamelCase : Optional[Any] = (image_size // patch_size) ** 2 + 1
__lowerCamelCase : Any = self.text_seq_length + self.image_seq_length
def _snake_case ( self: List[str] ):
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCamelCase : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCamelCase : List[str] = bbox[i, j, 3]
__lowerCamelCase : str = bbox[i, j, 1]
__lowerCamelCase : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCamelCase : Tuple = bbox[i, j, 2]
__lowerCamelCase : Any = bbox[i, j, 0]
__lowerCamelCase : List[str] = tmp_coordinate
__lowerCamelCase : str = tf.constant(a )
__lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Any = None
if self.use_input_mask:
__lowerCamelCase : int = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCamelCase : Tuple = None
if self.use_token_type_ids:
__lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = None
if self.use_labels:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCamelCase : Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self: Tuple , a: List[Any] , a: Any , a: List[str] , a: Dict , a: Optional[Any] , a: Dict ):
__lowerCamelCase : Optional[Any] = TFLayoutLMvaModel(config=a )
# text + image
__lowerCamelCase : Optional[Any] = model(a , pixel_values=a , training=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , training=a , )
__lowerCamelCase : List[Any] = model(a , bbox=a , pixel_values=a , training=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCamelCase : List[Any] = model(a , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCamelCase : Optional[Any] = model({'pixel_values': pixel_values} , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self: Dict , a: Dict , a: Optional[Any] , a: int , a: Optional[int] , a: List[str] , a: List[str] , a: List[str] ):
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : str = TFLayoutLMvaForSequenceClassification(config=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Tuple , a: Optional[Any] , a: List[Any] ):
__lowerCamelCase : Union[str, Any] = self.num_labels
__lowerCamelCase : Any = TFLayoutLMvaForTokenClassification(config=a )
__lowerCamelCase : Optional[Any] = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self: Dict , a: Optional[Any] , a: str , a: Dict , a: Union[str, Any] , a: List[Any] , a: Optional[int] , a: List[str] ):
__lowerCamelCase : List[Any] = 2
__lowerCamelCase : Any = TFLayoutLMvaForQuestionAnswering(config=a )
__lowerCamelCase : Any = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , training=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = self.prepare_config_and_inputs()
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = config_and_inputs
__lowerCamelCase : Tuple = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def _snake_case ( self: int , a: List[str] , a: Any , a: Optional[Any] , a: Tuple , a: Tuple ):
return True
def _snake_case ( self: str , a: Any , a: Any , a: Optional[int]=False ):
__lowerCamelCase : List[str] = copy.deepcopy(a )
if model_class in get_values(a ):
__lowerCamelCase : Tuple = {
k: tf.tile(tf.expand_dims(a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a ):
__lowerCamelCase : Any = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Dict = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _snake_case ( self: Tuple ):
__lowerCamelCase : int = TFLayoutLMvaModelTester(self )
__lowerCamelCase : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _snake_case ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : int = model_class(a )
if getattr(a , 'hf_compute_loss' , a ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCamelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=a )[0]
]
__lowerCamelCase : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCamelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : Dict = prepared_for_class.pop('input_ids' )
__lowerCamelCase : str = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCamelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : List[str] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__lowerCamelCase : int = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCamelCase : Tuple = -100
__lowerCamelCase : Tuple = tf.convert_to_tensor(a )
__lowerCamelCase : Tuple = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCamelCase : int = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : str = model(a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCamelCase : str = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
# Get keys that were added with the _prepare_for_class function
__lowerCamelCase : Optional[Any] = prepared_for_class.keys() - inputs_dict.keys()
__lowerCamelCase : List[Any] = inspect.signature(model.call ).parameters
__lowerCamelCase : List[str] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCamelCase : Optional[int] = {0: 'input_ids'}
for label_key in label_keys:
__lowerCamelCase : Dict = signature_names.index(a )
__lowerCamelCase : str = label_key
__lowerCamelCase : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCamelCase : Optional[int] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCamelCase : Optional[int] = prepared_for_class[value]
__lowerCamelCase : Any = tuple(a )
# Send to model
__lowerCamelCase : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _snake_case ( self: List[str] ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: int ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: Dict ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
a , a , a , a , a , a , a )
@slow
def _snake_case ( self: int ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Dict = TFLayoutLMvaModel.from_pretrained(a )
self.assertIsNotNone(a )
def UpperCamelCase__ ( ):
__lowerCamelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self: Optional[int] ):
return LayoutLMvaImageProcessor(apply_ocr=a ) if is_vision_available() else None
@slow
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Tuple = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__lowerCamelCase : Union[str, Any] = self.default_image_processor
__lowerCamelCase : List[Any] = prepare_img()
__lowerCamelCase : str = image_processor(images=a , return_tensors='tf' ).pixel_values
__lowerCamelCase : Union[str, Any] = tf.constant([[1, 2]] )
__lowerCamelCase : str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCamelCase : int = model(input_ids=a , bbox=a , pixel_values=a , training=a )
# verify the logits
__lowerCamelCase : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , a )
__lowerCamelCase : Any = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1e-4 ) )
| 669 | 0 |
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def UpperCAmelCase ( UpperCAmelCase__ : List[str]):
lowerCamelCase : List[Any] = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__)
lowerCamelCase : Dict = flatten_dict(SCREAMING_SNAKE_CASE__)
return flax_params
def UpperCAmelCase ( UpperCAmelCase__ : List[Any]):
lowerCamelCase : Optional[Any] = {}
lowerCamelCase : Tuple = {
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
lowerCamelCase : Any = {
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowerCamelCase : int = '.'.join(key[1:])
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowerCamelCase : Optional[int] = new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowerCamelCase : List[Any] = new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowerCamelCase : Optional[int] = re.sub(R'layers_(\d+)' , R'layer.\1' , SCREAMING_SNAKE_CASE__)
lowerCamelCase : Optional[Any] = new_key.replace('encoder' , 'encoder.encoder')
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowerCamelCase : str = re.sub(R'layers_(\d+)' , R'layer.\1' , SCREAMING_SNAKE_CASE__)
lowerCamelCase : int = flax_dict[key]
lowerCamelCase : Dict = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowerCamelCase : str = torch.from_numpy(converted_dict[key].T)
else:
lowerCamelCase : Union[str, Any] = torch.from_numpy(converted_dict[key])
return converted_torch_dict
def UpperCAmelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Union[str, Any]=False):
lowerCamelCase : Optional[Any] = get_flax_param(SCREAMING_SNAKE_CASE__)
if not use_large:
lowerCamelCase : int = PixaStructVisionConfig()
lowerCamelCase : Dict = PixaStructTextConfig()
else:
lowerCamelCase : int = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18)
lowerCamelCase : Dict = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18)
lowerCamelCase : List[Any] = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE__)
lowerCamelCase : Any = PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__)
lowerCamelCase : int = rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__)
model.load_state_dict(SCREAMING_SNAKE_CASE__)
lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer')
lowerCamelCase : str = PixaStructImageProcessor()
lowerCamelCase : int = PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__)
if use_large:
lowerCamelCase : List[Any] = 40_96
lowerCamelCase : Dict = True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__)
model.save_pretrained(SCREAMING_SNAKE_CASE__)
processor.save_pretrained(SCREAMING_SNAKE_CASE__)
print('Model saved in {}'.format(SCREAMING_SNAKE_CASE__))
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 320 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = CLIPTokenizer
__snake_case = CLIPTokenizerFast
__snake_case = True
__snake_case = {}
__snake_case = False
def _snake_case ( self: Union[str, Any] ):
super().setUp()
# fmt: off
__lowerCamelCase : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__lowerCamelCase : Tuple = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
__lowerCamelCase : Tuple = {'unk_token': '<unk>'}
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _snake_case ( self: Tuple , **a: Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Union[str, Any] , **a: List[str] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : Tuple = 'lower newer'
__lowerCamelCase : Tuple = 'lower newer'
return input_text, output_text
def _snake_case ( self: List[str] ):
__lowerCamelCase : List[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase : Optional[Any] = 'lower newer'
__lowerCamelCase : int = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
__lowerCamelCase : Optional[int] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase : int = tokens + [tokenizer.unk_token]
__lowerCamelCase : int = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@require_ftfy
def _snake_case ( self: Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : str = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
__lowerCamelCase : Optional[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[Any] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__lowerCamelCase : List[Any] = 'xa\u0303y' + ' ' + 'x\xe3y'
__lowerCamelCase : Tuple = tokenizer_s.tokenize(a )
__lowerCamelCase : Any = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of space type
__lowerCamelCase : List[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__lowerCamelCase : List[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[int] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of line break type
__lowerCamelCase : str = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__lowerCamelCase : Dict = tokenizer_s.tokenize(a )
__lowerCamelCase : List[str] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
def _snake_case ( self: List[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCamelCase : Optional[int] = F'{text_of_1_token} {text_of_1_token}'
__lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase : List[Any] = F' {text}'
__lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
def _snake_case ( self: str ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(a ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def _snake_case ( self: Tuple ):
super().test_tokenization_python_rust_equals()
def _snake_case ( self: Tuple ):
# CLIP always lower cases letters
pass
| 669 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__magic_name__ : Dict = StableUnCLIPImgaImgPipeline
__magic_name__ : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
__magic_name__ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__magic_name__ : int = frozenset([] )
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase : int = 32
__UpperCamelCase : Any = embedder_hidden_size
# image encoding components
__UpperCamelCase : List[str] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__UpperCamelCase : Dict = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCAmelCase , projection_dim=lowerCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__UpperCamelCase : Tuple = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase )
__UpperCamelCase : Dict = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
__UpperCamelCase : Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__UpperCamelCase : Dict = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase , layers_per_block=1 , upcast_attention=lowerCAmelCase , use_linear_projection=lowerCAmelCase , )
torch.manual_seed(0 )
__UpperCamelCase : str = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="""v_prediction""" , set_alpha_to_one=lowerCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCamelCase : int = AutoencoderKL()
__UpperCamelCase : List[str] = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : int=True ) -> int:
"""simple docstring"""
if str(lowerCAmelCase ).startswith("""mps""" ):
__UpperCamelCase : Any = torch.manual_seed(lowerCAmelCase )
else:
__UpperCamelCase : List[Any] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
__UpperCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if pil_image:
__UpperCamelCase : int = input_image * 0.5 + 0.5
__UpperCamelCase : Tuple = input_image.clamp(0 , 1 )
__UpperCamelCase : Dict = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__UpperCamelCase : List[str] = DiffusionPipeline.numpy_to_pil(lowerCAmelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : Union[str, Any] = self.get_dummy_components()
__UpperCamelCase : Any = StableUnCLIPImgaImgPipeline(**lowerCAmelCase )
__UpperCamelCase : Optional[int] = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
__UpperCamelCase : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase )
inputs.update({"""image_embeds""": None} )
__UpperCamelCase : Tuple = sd_pipe(**lowerCAmelCase ).images
__UpperCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase : List[str] = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self : List[str] ) -> Any:
"""simple docstring"""
__UpperCamelCase : str = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase : Optional[int] = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCAmelCase )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
__UpperCamelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
__UpperCamelCase : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__UpperCamelCase : Optional[int] = pipe(lowerCAmelCase , """anime turle""" , generator=lowerCAmelCase , output_type="""np""" )
__UpperCamelCase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
__UpperCamelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
__UpperCamelCase : Dict = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__UpperCamelCase : Optional[Any] = pipe(lowerCAmelCase , """anime turle""" , generator=lowerCAmelCase , output_type="""np""" )
__UpperCamelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase : int = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
__UpperCamelCase : Union[str, Any] = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase : Dict = pipe(
lowerCAmelCase , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
__UpperCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 279 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowercase_ = False
try:
lowercase_ = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class A_ :
'''simple docstring'''
def __init__( self: int , a: str = None , a: list = [] ):
__lowerCamelCase : Dict = 0
__lowerCamelCase : Dict = choices
__lowerCamelCase : Tuple = prompt
if sys.platform == "win32":
__lowerCamelCase : Union[str, Any] = '*'
else:
__lowerCamelCase : Any = '➔ '
def _snake_case ( self: Any , a: Tuple , a: str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a )
else:
forceWrite(self.choices[index] , a )
def _snake_case ( self: Tuple , a: int ):
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(a )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _snake_case ( self: Optional[int] , a: Direction , a: int = 1 ):
__lowerCamelCase : str = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a )
move_cursor(a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def _snake_case ( self: Tuple ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def _snake_case ( self: Optional[int] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def _snake_case ( self: str ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def _snake_case ( self: Union[str, Any] ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a )] for number in range(10 )] )
def _snake_case ( self: str ):
__lowerCamelCase : List[Any] = int(chr(self.current_selection ) )
__lowerCamelCase : Any = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a )
else:
return
else:
return
def _snake_case ( self: str , a: int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
__lowerCamelCase : Dict = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
__lowerCamelCase : Any = int(builtins.input() )
except ValueError:
__lowerCamelCase : str = default_choice
else:
__lowerCamelCase : Optional[int] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(a , '\n' )
return choice
| 669 | 0 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str:
"""simple docstring"""
def wrapper(*SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : int ):
_UpperCAmelCase = timeit.default_timer()
_UpperCAmelCase = func(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = timeit.default_timer() - starttime
return delta
_UpperCAmelCase = func.__name__
return wrapper
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]=1_00 , SCREAMING_SNAKE_CASE_ : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = seq_shapes or {}
for i in range(SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(SCREAMING_SNAKE_CASE__ , _ArrayXD ):
_UpperCAmelCase = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(SCREAMING_SNAKE_CASE__ , datasets.Value ):
if v.dtype == "string":
_UpperCAmelCase = 'The small grey turtle was surprisingly fast when challenged.'
else:
_UpperCAmelCase = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(SCREAMING_SNAKE_CASE__ , datasets.Sequence ):
while isinstance(SCREAMING_SNAKE_CASE__ , datasets.Sequence ):
_UpperCAmelCase = v.feature
_UpperCAmelCase = seq_shapes[k]
_UpperCAmelCase = np.random.rand(*SCREAMING_SNAKE_CASE__ ).astype(v.dtype )
_UpperCAmelCase = data
dummy_data.append((i, example) )
return dummy_data
def A__ ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any=1_00 , SCREAMING_SNAKE_CASE_ : int=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = generate_examples(SCREAMING_SNAKE_CASE__ , num_examples=SCREAMING_SNAKE_CASE__ , seq_shapes=SCREAMING_SNAKE_CASE__ )
with ArrowWriter(features=SCREAMING_SNAKE_CASE__ , path=SCREAMING_SNAKE_CASE__ ) as writer:
for key, record in dummy_data:
_UpperCAmelCase = features.encode_example(SCREAMING_SNAKE_CASE__ )
writer.write(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
_UpperCAmelCase = datasets.Dataset.from_file(filename=SCREAMING_SNAKE_CASE__ , info=datasets.DatasetInfo(features=SCREAMING_SNAKE_CASE__ ) )
return dataset | 32 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = (CMStochasticIterativeScheduler,)
__snake_case = 10
def _snake_case ( self: Any , **a: Dict ):
__lowerCamelCase : Optional[Any] = {
'num_train_timesteps': 201,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
config.update(**a )
return config
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Any = 10
__lowerCamelCase : Any = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = self.scheduler_classes[0](**a )
scheduler.set_timesteps(a )
__lowerCamelCase : Any = scheduler.timesteps[0]
__lowerCamelCase : List[str] = scheduler.timesteps[1]
__lowerCamelCase : Union[str, Any] = self.dummy_sample
__lowerCamelCase : int = 0.1 * sample
__lowerCamelCase : Optional[Any] = scheduler.step(a , a , a ).prev_sample
__lowerCamelCase : List[str] = scheduler.step(a , a , a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self: Optional[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a )
def _snake_case ( self: List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : Tuple = self.scheduler_classes[0]
__lowerCamelCase : Tuple = self.get_scheduler_config()
__lowerCamelCase : Tuple = scheduler_class(**a )
__lowerCamelCase : int = 1
scheduler.set_timesteps(a )
__lowerCamelCase : Optional[int] = scheduler.timesteps
__lowerCamelCase : List[str] = torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(a ):
# 1. scale model input
__lowerCamelCase : List[str] = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Optional[int] = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : str = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : str = pred_prev_sample
__lowerCamelCase : List[str] = torch.sum(torch.abs(a ) )
__lowerCamelCase : str = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 1_9_2.7_6_1_4 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_1_0 ) < 1e-3
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Optional[Any] = self.get_scheduler_config()
__lowerCamelCase : int = scheduler_class(**a )
__lowerCamelCase : List[Any] = [106, 0]
scheduler.set_timesteps(timesteps=a )
__lowerCamelCase : Dict = scheduler.timesteps
__lowerCamelCase : int = torch.manual_seed(0 )
__lowerCamelCase : Any = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__lowerCamelCase : Tuple = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Tuple = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : Any = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : Any = pred_prev_sample
__lowerCamelCase : Dict = torch.sum(torch.abs(a ) )
__lowerCamelCase : Optional[Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 3_4_7.6_3_5_7 ) < 1e-2
assert abs(result_mean.item() - 0.4_5_2_7 ) < 1e-3
def _snake_case ( self: Tuple ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : int = self.get_scheduler_config()
__lowerCamelCase : List[Any] = scheduler_class(**a )
__lowerCamelCase : Optional[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(a , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a )
def _snake_case ( self: int ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [39, 30, 12, 1, 0]
__lowerCamelCase : List[Any] = len(a )
with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Dict = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a )
| 669 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__lowerCamelCase = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
__lowerCamelCase = {
"roberta-base": 5_12,
"roberta-large": 5_12,
"roberta-large-mnli": 5_12,
"distilroberta-base": 5_12,
"roberta-base-openai-detector": 5_12,
"roberta-large-openai-detector": 5_12,
}
class _snake_case ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase__ =VOCAB_FILES_NAMES
UpperCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ =["""input_ids""", """attention_mask"""]
UpperCamelCase__ =RobertaTokenizer
def __init__( self : Optional[Any] , snake_case : Tuple=None , snake_case : Union[str, Any]=None , snake_case : Any=None , snake_case : Dict="replace" , snake_case : int="<s>" , snake_case : Optional[Any]="</s>" , snake_case : Optional[Any]="</s>" , snake_case : List[str]="<s>" , snake_case : int="<unk>" , snake_case : List[str]="<pad>" , snake_case : Any="<mask>" , snake_case : Optional[int]=False , snake_case : int=True , **snake_case : Dict , ):
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
UpperCAmelCase_ :Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , snake_case ) != add_prefix_space:
UpperCAmelCase_ :int = getattr(snake_case , pre_tok_state.pop('''type''' ) )
UpperCAmelCase_ :int = add_prefix_space
UpperCAmelCase_ :Union[str, Any] = pre_tok_class(**snake_case )
UpperCAmelCase_ :Optional[Any] = add_prefix_space
UpperCAmelCase_ :Optional[Any] = 'post_processor'
UpperCAmelCase_ :Union[str, Any] = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
UpperCAmelCase_ :Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase_ :Union[str, Any] = tuple(state['''sep'''] )
if "cls" in state:
UpperCAmelCase_ :Union[str, Any] = tuple(state['''cls'''] )
UpperCAmelCase_ :List[Any] = False
if state.get('''add_prefix_space''' , snake_case ) != add_prefix_space:
UpperCAmelCase_ :Optional[int] = add_prefix_space
UpperCAmelCase_ :str = True
if state.get('''trim_offsets''' , snake_case ) != trim_offsets:
UpperCAmelCase_ :Union[str, Any] = trim_offsets
UpperCAmelCase_ :str = True
if changes_to_apply:
UpperCAmelCase_ :int = getattr(snake_case , state.pop('''type''' ) )
UpperCAmelCase_ :Optional[Any] = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
def snake_case_ ( self : List[Any] ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case_ ( self : int , snake_case : Optional[Any] ):
UpperCAmelCase_ :Optional[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
UpperCAmelCase_ :Any = value
def snake_case_ ( self : List[str] , *snake_case : str , **snake_case : int ):
UpperCAmelCase_ :Dict = kwargs.get('''is_split_into_words''' , snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case , **snake_case )
def snake_case_ ( self : Dict , *snake_case : Any , **snake_case : int ):
UpperCAmelCase_ :Optional[Any] = kwargs.get('''is_split_into_words''' , snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case , **snake_case )
def snake_case_ ( self : List[Any] , snake_case : str , snake_case : Optional[str] = None ):
UpperCAmelCase_ :Tuple = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def snake_case_ ( self : Optional[Any] , snake_case : Dict , snake_case : Union[str, Any]=None ):
UpperCAmelCase_ :Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case_ ( self : Dict , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
UpperCAmelCase_ :Any = [self.sep_token_id]
UpperCAmelCase_ :str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 608 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowercase_ = input('Enter image url: ').strip()
print(F"""Downloading image from {url} ...""")
lowercase_ = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
lowercase_ = soup.find('meta', {'property': 'og:image'})['content']
lowercase_ = requests.get(image_url).content
lowercase_ = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 669 | 0 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCamelCase ( _snake_case : Union[str, Any] ,_snake_case : int ,_snake_case : List[Any] ,_snake_case : Optional[int] ,_snake_case : str ,_snake_case : List[Any] ,_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : Dict ,):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowercase__ = cst_fwd.get(SCREAMING_SNAKE_CASE__ ,np.inf )
lowercase__ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowercase__ = new_cost_f
lowercase__ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowercase__ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCamelCase ( _snake_case : Union[str, Any] ,_snake_case : str ,_snake_case : Optional[int] ,_snake_case : Any ):
'''simple docstring'''
lowercase__ = -1
lowercase__ = set()
lowercase__ = set()
lowercase__ = {source: 0}
lowercase__ = {destination: 0}
lowercase__ = {source: None}
lowercase__ = {destination: None}
lowercase__ = PriorityQueue()
lowercase__ = PriorityQueue()
lowercase__ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowercase__ = queue_forward.get()
visited_forward.add(SCREAMING_SNAKE_CASE__ )
lowercase__ = queue_backward.get()
visited_backward.add(SCREAMING_SNAKE_CASE__ )
lowercase__ = pass_and_relaxation(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,)
lowercase__ = pass_and_relaxation(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowercase__ = shortest_distance
return shortest_path_distance
SCREAMING_SNAKE_CASE__ = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
SCREAMING_SNAKE_CASE__ = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 267 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowercase_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowercase_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
lowercase_ = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
lowercase_ = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
lowercase_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
lowercase_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 669 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( __UpperCamelCase):
'''simple docstring'''
_A = ['image_processor', 'tokenizer']
_A = 'CLIPImageProcessor'
_A = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self :Union[str, Any] , a :int=None , a :List[str]=None , **a :str ) -> Union[str, Any]:
__UpperCamelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
__UpperCamelCase : str = kwargs.pop("feature_extractor" )
__UpperCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
def __call__( self :Optional[int] , a :List[Any]=None , a :List[str]=None , a :int=None , **a :List[Any] ) -> Tuple:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__UpperCamelCase : Dict = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
__UpperCamelCase : Tuple = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
__UpperCamelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _lowerCamelCase ( self :List[Any] , *a :Optional[Any] , **a :int ) -> Any:
return self.tokenizer.batch_decode(*a , **a )
def _lowerCamelCase ( self :Any , *a :Union[str, Any] , **a :Optional[Any] ) -> Dict:
return self.tokenizer.decode(*a , **a )
@property
def _lowerCamelCase ( self :List[str] ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.tokenizer.model_input_names
__UpperCamelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 557 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """xlm-roberta"""
def __init__( self: Optional[Any] , a: int=3_0522 , a: List[Any]=768 , a: Tuple=12 , a: List[str]=12 , a: Dict=3072 , a: List[str]="gelu" , a: Any=0.1 , a: Optional[Any]=0.1 , a: str=512 , a: Optional[int]=2 , a: int=0.0_2 , a: str=1e-12 , a: str=1 , a: List[Any]=0 , a: Dict=2 , a: Dict="absolute" , a: List[Any]=True , a: str=None , **a: List[Any] , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase : Optional[Any] = vocab_size
__lowerCamelCase : Optional[Any] = hidden_size
__lowerCamelCase : Dict = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : str = hidden_act
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : int = max_position_embeddings
__lowerCamelCase : Any = type_vocab_size
__lowerCamelCase : int = initializer_range
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : List[Any] = position_embedding_type
__lowerCamelCase : List[str] = use_cache
__lowerCamelCase : Optional[int] = classifier_dropout
class A_ ( __UpperCamelCase ):
'''simple docstring'''
@property
def _snake_case ( self: Optional[Any] ):
if self.task == "multiple-choice":
__lowerCamelCase : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.