code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( a__ , unittest.TestCase ):
"""simple docstring"""
a : str =KandinskyVaaControlnetImgaImgPipeline
a : List[Any] =["image_embeds", "negative_image_embeds", "image", "hint"]
a : Dict =["image_embeds", "negative_image_embeds", "image", "hint"]
a : Optional[Any] =[
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a : Tuple =False
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 100
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : str = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCAmelCase : Optional[int] = UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.dummy_unet
lowerCAmelCase : Optional[int] = self.dummy_movq
lowerCAmelCase : List[str] = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowerCAmelCase : Union[str, Any] = DDIMScheduler(**__lowerCAmelCase )
lowerCAmelCase : List[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowercase__ ( self , snake_case__ , snake_case__=0 ):
"""simple docstring"""
lowerCAmelCase : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowerCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCAmelCase )
# create init_image
lowerCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowerCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase : int = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("RGB" ).resize((256, 256) )
# create hint
lowerCAmelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
if str(__lowerCAmelCase ).startswith("mps" ):
lowerCAmelCase : Dict = torch.manual_seed(__lowerCAmelCase )
else:
lowerCAmelCase : Dict = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowerCAmelCase : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = '''cpu'''
lowerCAmelCase : Dict = self.get_dummy_components()
lowerCAmelCase : List[str] = self.pipeline_class(**__lowerCAmelCase )
lowerCAmelCase : Any = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowerCAmelCase : int = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
lowerCAmelCase : List[Any] = output.images
lowerCAmelCase : str = pipe(
**self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0]
lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : int = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
lowerCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCAmelCase : List[Any] = init_image.resize((512, 512) )
lowerCAmelCase : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
lowerCAmelCase : str = torch.from_numpy(np.array(__lowerCAmelCase ) ).float() / 255.0
lowerCAmelCase : List[str] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowerCAmelCase : Union[str, Any] = '''A robot, 4k photo'''
lowerCAmelCase : int = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCAmelCase )
lowerCAmelCase : Dict = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
lowerCAmelCase : Optional[Any] = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
lowerCAmelCase : Any = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase : Optional[Any] = pipe_prior(
__lowerCAmelCase , image=__lowerCAmelCase , strength=0.85 , generator=__lowerCAmelCase , negative_prompt="" , ).to_tuple()
lowerCAmelCase : Tuple = pipeline(
image=__lowerCAmelCase , image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , hint=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , )
lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
| 645
|
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Optional[Any] = tempfile.mkdtemp()
lowercase__ : Dict = 5
# Realm tok
lowercase__ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase__ : str = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
lowercase__ : Union[str, Any] = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowercase__ : List[Any] = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
def _lowerCAmelCase( self ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def _lowerCAmelCase( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Dict = RealmConfig(num_block_records=self.num_block_records )
return config
def _lowerCAmelCase( self ) -> int:
lowercase__ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def _lowerCAmelCase( self ) -> int:
lowercase__ : Optional[int] = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=__lowerCAmelCase , )
return block_records
def _lowerCAmelCase( self ) -> str:
lowercase__ : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Union[str, Any] = self.get_config()
lowercase__ : List[str] = self.get_dummy_retriever()
lowercase__ : List[str] = retriever.tokenizer
lowercase__ : str = np.array([0, 3] , dtype='''long''' )
lowercase__ : Union[str, Any] = tokenizer(['''Test question'''] ).input_ids
lowercase__ : Tuple = tokenizer(
['''the fourth'''] , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ).input_ids
lowercase__ : int = config.reader_seq_len
lowercase__ , lowercase__ , lowercase__ , lowercase__ : int = retriever(
__lowerCAmelCase , __lowerCAmelCase , answer_ids=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors='''np''' )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Optional[Any] = self.get_config()
lowercase__ : Dict = self.get_dummy_retriever()
lowercase__ : List[Any] = retriever.tokenizer
lowercase__ : Optional[int] = np.array([0, 3, 5] , dtype='''long''' )
lowercase__ : Any = tokenizer(['''Test question'''] ).input_ids
lowercase__ : str = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ).input_ids
lowercase__ : Tuple = config.reader_seq_len
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = retriever(
__lowerCAmelCase , __lowerCAmelCase , answer_ids=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors='''np''' )
self.assertEqual([False, True, True] , __lowerCAmelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __lowerCAmelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> str:
lowercase__ : Tuple = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
lowercase__ : Tuple = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
lowercase__ : Any = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
lowercase__ : Dict = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 152
| 0
|
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__)
def UpperCAmelCase_ ( __lowercase : np.ndarray , __lowercase : Union[int, Iterable[int]] , __lowercase : bool , __lowercase : int ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(__lowercase : str , __lowercase : int , __lowercase : str=0 , __lowercase : Optional[int]=None ):
_UpperCAmelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_UpperCAmelCase = math.floor(val / multiple ) * multiple
if x < min_val:
_UpperCAmelCase = math.ceil(val / multiple ) * multiple
return x
_UpperCAmelCase = (output_size, output_size) if isinstance(__lowercase , __lowercase ) else output_size
_UpperCAmelCase , _UpperCAmelCase = get_image_size(__lowercase )
_UpperCAmelCase , _UpperCAmelCase = output_size
# determine new height and width
_UpperCAmelCase = output_height / input_height
_UpperCAmelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_UpperCAmelCase = scale_width
else:
# fit height
_UpperCAmelCase = scale_height
_UpperCAmelCase = constraint_to_multiple_of(scale_height * input_height , multiple=__lowercase )
_UpperCAmelCase = constraint_to_multiple_of(scale_width * input_width , multiple=__lowercase )
return (new_height, new_width)
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = ["""pixel_values"""]
def __init__( self : List[str] , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : bool = False , snake_case_ : int = 1 , snake_case_ : bool = True , snake_case_ : Union[int, float] = 1 / 2_5_5 , snake_case_ : bool = True , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , **snake_case_ : List[Any] , ):
super().__init__(**snake_case_ )
_UpperCAmelCase = size if size is not None else {"height": 3_8_4, "width": 3_8_4}
_UpperCAmelCase = get_size_dict(snake_case_ )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = keep_aspect_ratio
_UpperCAmelCase = ensure_multiple_of
_UpperCAmelCase = resample
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase ( self : List[Any] , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : bool = False , snake_case_ : int = 1 , snake_case_ : PILImageResampling = PILImageResampling.BICUBIC , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Any , ):
_UpperCAmelCase = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
_UpperCAmelCase = get_resize_output_image_size(
snake_case_ , output_size=(size["height"], size["width"]) , keep_aspect_ratio=snake_case_ , multiple=snake_case_ , )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowercase ( self : str , snake_case_ : np.ndarray , snake_case_ : Union[int, float] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Tuple , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowercase ( self : str , snake_case_ : np.ndarray , snake_case_ : Union[float, List[float]] , snake_case_ : Union[float, List[float]] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Union[str, Any] , ):
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowercase ( self : str , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : int = None , snake_case_ : bool = None , snake_case_ : int = None , snake_case_ : PILImageResampling = None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : Union[str, Any] , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(snake_case_ )
_UpperCAmelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_UpperCAmelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
_UpperCAmelCase = {"pixel_values": images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
def lowercase ( self : str , snake_case_ : Tuple , snake_case_ : List[Tuple] = None ):
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(snake_case_ ):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(snake_case_ ) ):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=snake_case_ )
_UpperCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case_ )
else:
_UpperCAmelCase = logits.argmax(dim=1 )
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 119
|
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__SCREAMING_SNAKE_CASE :Optional[Any] = logging.getLogger()
__SCREAMING_SNAKE_CASE :Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : List[str] , snake_case_ : Union[str, Any] ):
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_UpperCAmelCase = {"source": "What is love ?", "target": "life"}
_UpperCAmelCase = {"train": 1_2, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_UpperCAmelCase = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(snake_case_ , f'{split}.{field}' ) , "w" ) as f:
f.write(snake_case_ )
def lowercase ( self : Any , snake_case_ : int , snake_case_ : str = "pytorch" ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = os.path.join(snake_case_ , "output" )
_UpperCAmelCase = os.path.join(snake_case_ , "data" )
self._create_dummy_data(data_dir=snake_case_ )
_UpperCAmelCase = f'\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n '.split()
if gpus > 0:
testargs.append(f'--gpus={gpus}' )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
_UpperCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(snake_case_ , env=self.get_env() )
_UpperCAmelCase = os.path.join(snake_case_ , "metrics.json" )
with open(snake_case_ ) as f:
_UpperCAmelCase = json.load(snake_case_ )
return result
@require_torch_gpu
def lowercase ( self : Tuple ):
_UpperCAmelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def lowercase ( self : Tuple ):
_UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 119
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
UpperCamelCase = """upernet"""
def __init__( self :int , __snake_case :int=None , __snake_case :Optional[int]=5_12 , __snake_case :Any=0.02 , __snake_case :str=[1, 2, 3, 6] , __snake_case :Optional[Any]=True , __snake_case :Optional[Any]=0.4 , __snake_case :Tuple=3_84 , __snake_case :Optional[int]=2_56 , __snake_case :Dict=1 , __snake_case :Any=False , __snake_case :Tuple=2_55 , **__snake_case :int , ):
'''simple docstring'''
super().__init__(**__snake_case )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__magic_name__ : Optional[Any] =CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__snake_case , __snake_case ):
__magic_name__ : int =backbone_config.get("""model_type""" )
__magic_name__ : Optional[Any] =CONFIG_MAPPING[backbone_model_type]
__magic_name__ : List[Any] =config_class.from_dict(__snake_case )
__magic_name__ : Dict =backbone_config
__magic_name__ : Optional[Any] =hidden_size
__magic_name__ : List[str] =initializer_range
__magic_name__ : Tuple =pool_scales
__magic_name__ : Optional[Any] =use_auxiliary_head
__magic_name__ : List[str] =auxiliary_loss_weight
__magic_name__ : int =auxiliary_in_channels
__magic_name__ : Optional[int] =auxiliary_channels
__magic_name__ : Optional[int] =auxiliary_num_convs
__magic_name__ : int =auxiliary_concat_input
__magic_name__ : Optional[Any] =loss_ignore_index
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : List[str] =copy.deepcopy(self.__dict__ )
__magic_name__ : Dict =self.backbone_config.to_dict()
__magic_name__ : Optional[int] =self.__class__.model_type
return output
| 21
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def UpperCamelCase ( __lowerCamelCase : Dict ):
snake_case : Any = VideoMAEConfig()
set_architecture_configs(__lowerCamelCase , __lowerCamelCase )
if "finetuned" not in model_name:
snake_case : int = False
if "finetuned" in model_name:
snake_case : Dict = "huggingface/label-files"
if "kinetics" in model_name:
snake_case : List[str] = 400
snake_case : List[str] = "kinetics400-id2label.json"
elif "ssv2" in model_name:
snake_case : List[str] = 174
snake_case : Optional[int] = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
snake_case : Tuple = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) )
snake_case : Dict = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
snake_case : Optional[Any] = idalabel
snake_case : Tuple = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ):
if "small" in model_name:
snake_case : Dict = 384
snake_case : Optional[Any] = 1536
snake_case : Union[str, Any] = 12
snake_case : Dict = 16
snake_case : List[str] = 12
snake_case : List[str] = 3
snake_case : str = 192
snake_case : Union[str, Any] = 768
elif "large" in model_name:
snake_case : Tuple = 1024
snake_case : List[Any] = 4096
snake_case : Any = 24
snake_case : Optional[int] = 16
snake_case : List[str] = 12
snake_case : int = 8
snake_case : Any = 512
snake_case : List[Any] = 2048
elif "huge" in model_name:
snake_case : Any = 1280
snake_case : List[str] = 5120
snake_case : Dict = 32
snake_case : int = 16
snake_case : int = 12
snake_case : Tuple = 8
snake_case : str = 640
snake_case : List[Any] = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def UpperCamelCase ( __lowerCamelCase : Any ):
if "encoder." in name:
snake_case : Dict = name.replace("encoder." , "" )
if "cls_token" in name:
snake_case : str = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
snake_case : Tuple = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
snake_case : str = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
snake_case : int = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
snake_case : List[Any] = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
snake_case : Tuple = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
snake_case : Any = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
snake_case : int = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
snake_case : Optional[Any] = name.replace("attn" , "attention.self" )
if "attn" in name:
snake_case : List[str] = name.replace("attn" , "attention.attention" )
if "norm1" in name:
snake_case : Optional[int] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
snake_case : Any = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
snake_case : Optional[int] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
snake_case : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
snake_case : Optional[int] = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
snake_case : Tuple = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
snake_case : str = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
snake_case : List[Any] = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
snake_case : Dict = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
snake_case : Union[str, Any] = name.replace("head" , "classifier" )
return name
def UpperCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
for key in orig_state_dict.copy().keys():
snake_case : Optional[Any] = orig_state_dict.pop(__lowerCamelCase )
if key.startswith("encoder." ):
snake_case : Optional[Any] = key.replace("encoder." , "" )
if "qkv" in key:
snake_case : Tuple = key.split("." )
if key.startswith("decoder.blocks" ):
snake_case : str = config.decoder_hidden_size
snake_case : Optional[Any] = int(key_split[2] )
snake_case : Tuple = "decoder.decoder_layers."
if "weight" in key:
snake_case : Optional[Any] = val[:dim, :]
snake_case : List[str] = val[dim : dim * 2, :]
snake_case : str = val[-dim:, :]
else:
snake_case : str = config.hidden_size
snake_case : str = int(key_split[1] )
snake_case : Union[str, Any] = "videomae.encoder.layer."
if "weight" in key:
snake_case : int = val[:dim, :]
snake_case : int = val[dim : dim * 2, :]
snake_case : Union[str, Any] = val[-dim:, :]
else:
snake_case : Optional[Any] = val
return orig_state_dict
def UpperCamelCase ( ):
snake_case : Union[str, Any] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
snake_case : Optional[Any] = np.load(__lowerCamelCase )
return list(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
snake_case : List[str] = get_videomae_config(__lowerCamelCase )
if "finetuned" in model_name:
snake_case : List[Any] = VideoMAEForVideoClassification(__lowerCamelCase )
else:
snake_case : List[str] = VideoMAEForPreTraining(__lowerCamelCase )
# download original checkpoint, hosted on Google Drive
snake_case : List[Any] = "pytorch_model.bin"
gdown.cached_download(__lowerCamelCase , __lowerCamelCase , quiet=__lowerCamelCase )
snake_case : str = torch.load(__lowerCamelCase , map_location="cpu" )
if "model" in files:
snake_case : List[str] = files["model"]
else:
snake_case : List[str] = files["module"]
snake_case : List[str] = convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# verify model on basic input
snake_case : Tuple = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
snake_case : Dict = prepare_video()
snake_case : Dict = image_processor(__lowerCamelCase , return_tensors="pt" )
if "finetuned" not in model_name:
snake_case : List[Any] = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
snake_case : Union[str, Any] = torch.load(__lowerCamelCase )
snake_case : Any = model(**__lowerCamelCase )
snake_case : List[Any] = outputs.logits
snake_case : Any = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
snake_case : List[str] = torch.Size([1, 400] )
snake_case : List[str] = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
snake_case : Dict = torch.Size([1, 174] )
snake_case : Tuple = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
snake_case : Union[str, Any] = torch.Size([1, 1408, 1536] )
snake_case : Union[str, Any] = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
snake_case : str = torch.Size([1, 1408, 1536] )
snake_case : List[Any] = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
snake_case : int = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
snake_case : Optional[int] = torch.Size([1, 1408, 1536] )
snake_case : Tuple = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
snake_case : Dict = torch.Size([1, 400] )
snake_case : Any = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
snake_case : str = torch.Size([1, 400] )
snake_case : List[str] = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
snake_case : int = torch.Size([1, 400] )
snake_case : Tuple = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
snake_case : Optional[Any] = torch.Size([1, 400] )
snake_case : Dict = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
snake_case : List[Any] = torch.Size([1, 1408, 1536] )
snake_case : Union[str, Any] = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
snake_case : Any = torch.Size([1, 174] )
snake_case : str = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
snake_case : Dict = torch.Size([1, 1408, 1536] )
snake_case : List[Any] = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
snake_case : List[Any] = torch.Size([1, 174] )
snake_case : Optional[int] = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(f"""Model name not supported. Should be one of {model_names}""" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1E-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1E-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
snake_case : List[Any] = outputs.loss
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(__lowerCamelCase , organization="nielsr" )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowerCamelCase = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 204
| 0
|
import os
def __UpperCamelCase ( ) ->Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =os.path.dirname(os.path.realpath(a_ ) )
lowerCamelCase_ =os.path.join(a_ , """triangle.txt""" )
with open(a_ ) as f:
lowerCamelCase_ =f.readlines()
lowerCamelCase_ =[]
for line in triangle:
lowerCamelCase_ =[]
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(a_ ) )
a.append(a_ )
for i in range(1 , len(a_ ) ):
for j in range(len(a[i] ) ):
lowerCamelCase_ =a[i - 1][j] if j != len(a[i - 1] ) else 0
lowerCamelCase_ =a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(a_ , a_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 712
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[2, 2, 3, 2] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=["stage2", "stage3", "stage4"] , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , )-> Tuple:
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =num_stages
lowerCamelCase_ =hidden_sizes
lowerCamelCase_ =depths
lowerCamelCase_ =is_training
lowerCamelCase_ =use_labels
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =out_features
lowerCamelCase_ =num_labels
lowerCamelCase_ =scope
lowerCamelCase_ =num_stages
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ =self.get_config()
return config, pixel_values, labels
def _snake_case ( self )-> List[Any]:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _snake_case ( self )-> Union[str, Any]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_SCREAMING_SNAKE_CASE , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_SCREAMING_SNAKE_CASE , loss_ignore_index=255 , num_labels=self.num_labels , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[Any]:
lowerCamelCase_ =UperNetForSemanticSegmentation(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _snake_case ( self )-> str:
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) =config_and_inputs
lowerCamelCase_ ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:Optional[Any] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_UpperCamelCase:Any = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
_UpperCamelCase:Optional[Any] = False
_UpperCamelCase:Dict = False
_UpperCamelCase:int = False
_UpperCamelCase:Any = False
_UpperCamelCase:Optional[Any] = False
_UpperCamelCase:Optional[Any] = False
def _snake_case ( self )-> int:
lowerCamelCase_ =UperNetModelTester(self )
lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _snake_case ( self )-> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self )-> Tuple:
return
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ =[*signature.parameters.keys()]
lowerCamelCase_ =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def _snake_case ( self )-> str:
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def _snake_case ( self )-> str:
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _snake_case ( self )-> Optional[Any]:
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _snake_case ( self )-> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _snake_case ( self )-> List[Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case ( self )-> str:
pass
def _snake_case ( self )-> Optional[int]:
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCamelCase_ =model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ =self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ =True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ =_config_zero_init(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =_config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(config=_SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def _snake_case ( self )-> Dict:
pass
@slow
def _snake_case ( self )-> Tuple:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( ) ->Tuple:
"""simple docstring"""
lowerCamelCase_ =hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
lowerCamelCase_ =Image.open(_A ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
def _snake_case ( self )-> int:
lowerCamelCase_ =AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 75
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
_snake_case : Dict = ['transformers', 'torch', 'note_seq']
def __init__( self : Tuple , *lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : Optional[int] ) -> int:
'''simple docstring'''
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def snake_case__ ( cls : Dict , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def snake_case__ ( cls : str , *lowerCAmelCase__ : Any , **lowerCAmelCase__ : List[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 98
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
snake_case__ : Tuple = pd.read_csv('''sample_data.csv''', header=None)
snake_case__ : List[str] = df.shape[:1][0]
# If you're using some other dataset input the target column
snake_case__ : Dict = df.iloc[:, 1:2]
snake_case__ : List[str] = actual_data.values.reshape(len_data, 1)
snake_case__ : Union[str, Any] = MinMaxScaler().fit_transform(actual_data)
snake_case__ : Tuple = 1_0
snake_case__ : str = 5
snake_case__ : Any = 2_0
snake_case__ : Union[str, Any] = len_data - periods * look_back
snake_case__ : Union[str, Any] = actual_data[:division]
snake_case__ : Optional[Any] = actual_data[division - look_back :]
snake_case__ , snake_case__ : Dict = [], []
snake_case__ , snake_case__ : Dict = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
snake_case__ : int = np.array(train_x)
snake_case__ : List[str] = np.array(test_x)
snake_case__ : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
snake_case__ : int = np.array([list(i.ravel()) for i in test_y])
snake_case__ : List[Any] = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
snake_case__ : List[str] = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
snake_case__ : Optional[int] = model.predict(x_test)
| 392
| 0
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = []
for part_id in partition_order:
__UpperCamelCase :Optional[int] = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(SCREAMING_SNAKE_CASE ):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__UpperCamelCase :List[Any] = spark.range(100 ).repartition(1 )
__UpperCamelCase :Any = Spark(SCREAMING_SNAKE_CASE )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__UpperCamelCase :Union[str, Any] = spark.range(10 ).repartition(2 )
__UpperCamelCase :Optional[Any] = [1, 0]
__UpperCamelCase :Any = _generate_iterable_examples(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Reverse the partitions.
__UpperCamelCase :Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__UpperCamelCase :int = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__UpperCamelCase :str = spark.range(10 ).repartition(1 )
__UpperCamelCase :Tuple = SparkExamplesIterable(SCREAMING_SNAKE_CASE )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE ):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :str = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__UpperCamelCase :Union[str, Any] = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__UpperCamelCase :int = lambda SCREAMING_SNAKE_CASE : x.reverse()
__UpperCamelCase :List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE , [2, 1, 0] )
__UpperCamelCase :Union[str, Any] = SparkExamplesIterable(SCREAMING_SNAKE_CASE ).shuffle_data_sources(SCREAMING_SNAKE_CASE )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :str = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Dict = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__UpperCamelCase :List[str] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__UpperCamelCase :Union[str, Any] = SparkExamplesIterable(SCREAMING_SNAKE_CASE ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__UpperCamelCase :Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE , [0, 2] )
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :int = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__UpperCamelCase :Optional[Any] = SparkExamplesIterable(SCREAMING_SNAKE_CASE ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__UpperCamelCase :Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE , [1, 3] )
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :str = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__UpperCamelCase :List[Any] = spark.range(100 ).repartition(1 )
__UpperCamelCase :List[Any] = Spark(SCREAMING_SNAKE_CASE )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 712
|
from __future__ import annotations
import bisect
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = -1 ):
'''simple docstring'''
if hi < 0:
__UpperCamelCase :str = len(SCREAMING_SNAKE_CASE )
while lo < hi:
__UpperCamelCase :Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__UpperCamelCase :List[Any] = mid + 1
else:
__UpperCamelCase :Any = mid
return lo
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = -1 ):
'''simple docstring'''
if hi < 0:
__UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE )
while lo < hi:
__UpperCamelCase :Optional[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__UpperCamelCase :Dict = mid + 1
else:
__UpperCamelCase :Optional[Any] = mid
return lo
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_left(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_right(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = 0
__UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE ) - 1
while left <= right:
__UpperCamelCase :Optional[int] = left + (right - left) // 2
__UpperCamelCase :List[str] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__UpperCamelCase :Union[str, Any] = midpoint - 1
else:
__UpperCamelCase :Dict = midpoint + 1
return None
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = bisect.bisect_left(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if index != len(SCREAMING_SNAKE_CASE ) and sorted_collection[index] == item:
return index
return None
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if right < left:
return None
__UpperCamelCase :str = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , midpoint - 1 )
else:
return binary_search_by_recursion(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , midpoint + 1 , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = input('''Enter numbers separated by comma:\n''').strip()
__lowercase = sorted(int(item) for item in user_input.split(''','''))
__lowercase = int(input('''Enter a single number to be found in the list:\n'''))
__lowercase = binary_search(collection, target)
if result is None:
print(F'{target} was not found in {collection}.')
else:
print(F'{target} was found at position {result} in {collection}.')
| 452
| 0
|
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'xlm-prophetnet'
lowercase__ = ['past_key_values']
lowercase__ = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self , __a = 0.1 , __a = "gelu" , __a = 3_05_22 , __a = 10_24 , __a = 40_96 , __a = 12 , __a = 16 , __a = 40_96 , __a = 12 , __a = 16 , __a = 0.1 , __a = 0.1 , __a = 5_12 , __a = 0.02 , __a = True , __a = True , __a = 0 , __a = 2 , __a = 32 , __a = 1_28 , __a = False , __a = 0.0 , __a = True , __a = 0 , __a = 1 , __a = 2 , **__a , ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = num_encoder_layers
_UpperCamelCase = num_encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = num_decoder_layers
_UpperCamelCase = num_decoder_attention_heads
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = init_std # Normal(0, this parameter)
_UpperCamelCase = activation_function
# parameters for xlmprophetnet
_UpperCamelCase = ngram
_UpperCamelCase = num_buckets
_UpperCamelCase = relative_max_distance
_UpperCamelCase = disable_ngram_loss
_UpperCamelCase = eps
# 3 Types of Dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = dropout
_UpperCamelCase = use_cache
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , add_cross_attention=__a , decoder_start_token_id=__a , **__a , )
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCAmelCase ( self , __a) -> str:
'''simple docstring'''
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''')
| 19
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['pixel_values']
def __init__( self , __a = True , __a = None , __a = PILImageResampling.BICUBIC , __a = True , __a = True , __a = 1 / 2_55 , __a = None , __a = True , __a = None , __a = None , **__a , ) -> None:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a)
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a , default_to_square=__a , param_name='''crop_size''')
_UpperCamelCase = do_resize
_UpperCamelCase = do_rescale
_UpperCamelCase = do_normalize
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = rescale_factor
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self , __a , __a , __a = PILImageResampling.BILINEAR , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
if "shortest_edge" in size:
_UpperCamelCase = get_resize_output_image_size(__a , size=size['''shortest_edge'''] , default_to_square=__a)
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_UpperCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''')
return resize(__a , size=__a , resample=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''')
return center_crop(__a , size=(size['''height'''], size['''width''']) , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a) -> np.ndarray:
'''simple docstring'''
return rescale(__a , scale=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> BatchFeature:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(__a , param_name='''crop_size''' , default_to_square=__a)
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(__a)
if not is_batched(__a):
_UpperCamelCase = [images]
if not valid_images(__a):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(__a) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=__a , size=__a , resample=__a) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=__a , size=__a) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=__a , scale=__a) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=__a , mean=__a , std=__a) for image in images]
_UpperCamelCase = [to_channel_dimension_format(__a , __a) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=__a , tensor_type=__a)
| 19
| 1
|
'''simple docstring'''
import re
def lowercase_ ( lowercase__ ) ->bool:
_snake_case: Dict = re.compile(R'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' )
if match := re.search(lowercase__ , lowercase__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 273
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
def __init__( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : str=13 , __snake_case : Union[str, Any]=30 , __snake_case : Union[str, Any]=2 , __snake_case : Dict=3 , __snake_case : Optional[Any]=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=32 , __snake_case : Optional[int]=5 , __snake_case : Any=4 , __snake_case : int=37 , __snake_case : int="gelu" , __snake_case : Union[str, Any]=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : int=10 , __snake_case : Any=0.02 , __snake_case : List[str]=None , __snake_case : Tuple=2 , ):
'''simple docstring'''
_snake_case: Optional[Any] = parent
_snake_case: Tuple = batch_size
_snake_case: str = image_size
_snake_case: int = patch_size
_snake_case: Union[str, Any] = num_channels
_snake_case: Dict = is_training
_snake_case: Optional[Any] = use_labels
_snake_case: Optional[Any] = hidden_size
_snake_case: Tuple = num_hidden_layers
_snake_case: List[Any] = num_attention_heads
_snake_case: Union[str, Any] = intermediate_size
_snake_case: List[str] = hidden_act
_snake_case: Tuple = hidden_dropout_prob
_snake_case: List[Any] = attention_probs_dropout_prob
_snake_case: str = type_sequence_label_size
_snake_case: Any = initializer_range
_snake_case: str = scope
_snake_case: Union[str, Any] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case: Tuple = (image_size // patch_size) ** 2
_snake_case: List[str] = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case: List[str] = None
if self.use_labels:
_snake_case: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case: Union[str, Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : List[str] ):
'''simple docstring'''
_snake_case: Dict = ViTModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: Tuple = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : int ):
'''simple docstring'''
_snake_case: int = ViTForMaskedImageModeling(config=__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: Dict = model(__snake_case )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_snake_case: List[str] = 1
_snake_case: Tuple = ViTForMaskedImageModeling(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case: Union[str, Any] = model(__snake_case )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict ):
'''simple docstring'''
_snake_case: Optional[int] = self.type_sequence_label_size
_snake_case: Union[str, Any] = ViTForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: List[Any] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case: Tuple = 1
_snake_case: Optional[int] = ViTForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case: Any = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: Any = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
): int = config_and_inputs
_snake_case: Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: Optional[int] = ViTModelTester(self )
_snake_case: Union[str, Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case , _snake_case: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case: Optional[int] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case: Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
_snake_case , _snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case: int = model_class(__snake_case )
_snake_case: List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case: List[Any] = [*signature.parameters.keys()]
_snake_case: str = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case: Any = ViTModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowercase_ ( ) ->List[Any]:
_snake_case: Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
_snake_case: Optional[int] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(__snake_case )
_snake_case: Dict = self.default_image_processor
_snake_case: Optional[Any] = prepare_img()
_snake_case: List[str] = image_processor(images=__snake_case , return_tensors='pt' ).to(__snake_case )
# forward pass
with torch.no_grad():
_snake_case: Optional[int] = model(**__snake_case )
# verify the logits
_snake_case: Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __snake_case )
_snake_case: Dict = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: str = ViTModel.from_pretrained('facebook/dino-vits8' ).to(__snake_case )
_snake_case: Any = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_80 )
_snake_case: Optional[int] = prepare_img()
_snake_case: Dict = image_processor(images=__snake_case , return_tensors='pt' )
_snake_case: Optional[Any] = inputs.pixel_values.to(__snake_case )
# forward pass
with torch.no_grad():
_snake_case: str = model(__snake_case , interpolate_pos_encoding=__snake_case )
# verify the logits
_snake_case: List[str] = torch.Size((1, 36_01, 3_84) )
self.assertEqual(outputs.last_hidden_state.shape , __snake_case )
_snake_case: Any = torch.tensor(
[[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __snake_case , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
_snake_case: List[Any] = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
_snake_case: Dict = self.default_image_processor
_snake_case: Any = prepare_img()
_snake_case: str = image_processor(images=__snake_case , return_tensors='pt' )
_snake_case: Any = inputs.pixel_values.to(__snake_case )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_snake_case: int = model(__snake_case )
| 273
| 1
|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( __lowercase , unittest.TestCase ):
lowercase__: List[Any] = None
lowercase__: Optional[int] = BloomTokenizerFast
lowercase__: Union[str, Any] = BloomTokenizerFast
lowercase__: int = True
lowercase__: Tuple = False
lowercase__: Union[str, Any] = '''tokenizer_file'''
lowercase__: List[str] = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
super().setUp()
__snake_case : List[Any] = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Tuple , **__magic_name__ : List[Any] ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowercase__ ( self : Tuple ) -> Any:
"""simple docstring"""
__snake_case : Tuple = self.get_rust_tokenizer()
__snake_case : Optional[int] = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
__snake_case : Any = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
__snake_case : Optional[int] = tokenizer.batch_encode_plus(__magic_name__ )["""input_ids"""]
self.assertListEqual(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def lowercase__ ( self : Optional[int] , __magic_name__ : str=6 ) -> int:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__snake_case : List[Any] = """This is a simple input"""
__snake_case : List[Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
__snake_case : int = ("""This is a simple input""", """This is a pair""")
__snake_case : List[Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(__magic_name__ , max_length=__magic_name__ )
tokenizer_r.encode_plus(__magic_name__ , max_length=__magic_name__ )
tokenizer_r.batch_encode_plus(__magic_name__ , max_length=__magic_name__ )
tokenizer_r.encode(__magic_name__ , max_length=__magic_name__ )
tokenizer_r.batch_encode_plus(__magic_name__ , max_length=__magic_name__ )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
__snake_case : Optional[int] = None # Hotfixing padding = None
self.assertRaises(__magic_name__ , tokenizer_r.encode , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Simple input
self.assertRaises(__magic_name__ , tokenizer_r.encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Simple input
self.assertRaises(
__magic_name__ , tokenizer_r.batch_encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" , )
# Pair input
self.assertRaises(__magic_name__ , tokenizer_r.encode , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Pair input
self.assertRaises(__magic_name__ , tokenizer_r.encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Pair input
self.assertRaises(
__magic_name__ , tokenizer_r.batch_encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" , )
def lowercase__ ( self : int ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.get_rust_tokenizer()
__snake_case : List[Any] = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=__magic_name__ )
__snake_case : str = next(iter(__magic_name__ ) )["""premise"""] # pick up one data
__snake_case : Tuple = list(sample_data.values() )
__snake_case : Optional[Any] = list(map(tokenizer.encode , __magic_name__ ) )
__snake_case : Optional[int] = [tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) for x in output_tokens]
self.assertListEqual(__magic_name__ , __magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 26
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__UpperCamelCase = "examples/"
__UpperCamelCase = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__UpperCamelCase = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__UpperCamelCase = "README.md"
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : Union[str, Any] = f.read()
__snake_case , __snake_case : List[Any] = REPLACE_PATTERNS[pattern]
__snake_case : Optional[Any] = replace.replace("""VERSION""" , _lowerCamelCase )
__snake_case : Optional[Any] = re_pattern.sub(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , pattern="""examples""" )
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> str:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not patch:
update_version_in_examples(_lowerCamelCase )
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = """🤗 Transformers currently provides the following architectures"""
__snake_case : List[Any] = """1. Want to contribute a new model?"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : List[str] = f.readlines()
# Find the start of the list.
__snake_case : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__snake_case : int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__snake_case : Optional[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__snake_case : List[Any] = f.read()
__snake_case : str = REPLACE_PATTERNS["""init"""][0].search(_lowerCamelCase ).groups()[0]
return packaging.version.parse(_lowerCamelCase )
def _a ( _lowerCamelCase=False ) -> int:
"""simple docstring"""
__snake_case : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__snake_case : str = default_version.base_version
elif patch:
__snake_case : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__snake_case : Dict = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__snake_case : Dict = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Any = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase , patch=_lowerCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = get_version()
__snake_case : Tuple = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__snake_case : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
__snake_case : int = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Optional[int] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__UpperCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 26
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Optional[int] = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
__A : str = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 267
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : List[str] = KandinskyVaaPriorPipeline
lowercase : Optional[Any] = ['prompt']
lowercase : Dict = ['prompt', 'negative_prompt']
lowercase : Dict = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
lowercase : int = False
@property
def a__ ( self :int ):
return 3_2
@property
def a__ ( self :Any ):
return 3_2
@property
def a__ ( self :Union[str, Any] ):
return self.time_input_dim
@property
def a__ ( self :int ):
return self.time_input_dim * 4
@property
def a__ ( self :Dict ):
return 1_0_0
@property
def a__ ( self :List[Any] ):
snake_case_ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def a__ ( self :int ):
torch.manual_seed(0 )
snake_case_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
return CLIPTextModelWithProjection(_UpperCamelCase )
@property
def a__ ( self :Dict ):
torch.manual_seed(0 )
snake_case_ : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 1_2,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
snake_case_ : int = PriorTransformer(**_UpperCamelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
snake_case_ : Any = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def a__ ( self :Optional[int] ):
torch.manual_seed(0 )
snake_case_ : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=2_2_4 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1_4 ,)
snake_case_ : Optional[Any] = CLIPVisionModelWithProjection(_UpperCamelCase )
return model
@property
def a__ ( self :List[Any] ):
snake_case_ : Any = CLIPImageProcessor(
crop_size=2_2_4 ,do_center_crop=_UpperCamelCase ,do_normalize=_UpperCamelCase ,do_resize=_UpperCamelCase ,image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] ,image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] ,resample=3 ,size=2_2_4 ,)
return image_processor
def a__ ( self :List[Any] ):
snake_case_ : Tuple = self.dummy_prior
snake_case_ : Any = self.dummy_image_encoder
snake_case_ : Optional[int] = self.dummy_text_encoder
snake_case_ : Any = self.dummy_tokenizer
snake_case_ : Union[str, Any] = self.dummy_image_processor
snake_case_ : Tuple = UnCLIPScheduler(
variance_type="""fixed_small_log""" ,prediction_type="""sample""" ,num_train_timesteps=1_0_0_0 ,clip_sample=_UpperCamelCase ,clip_sample_range=10.0 ,)
snake_case_ : Tuple = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def a__ ( self :Dict ,_UpperCamelCase :str ,_UpperCamelCase :Union[str, Any]=0 ):
if str(_UpperCamelCase ).startswith("""mps""" ):
snake_case_ : Optional[int] = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ : Dict = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def a__ ( self :int ):
snake_case_ : Union[str, Any] = """cpu"""
snake_case_ : int = self.get_dummy_components()
snake_case_ : int = self.pipeline_class(**_UpperCamelCase )
snake_case_ : Union[str, Any] = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ : Any = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
snake_case_ : List[Any] = output.image_embeds
snake_case_ : List[Any] = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) ,return_dict=_UpperCamelCase ,)[0]
snake_case_ : Dict = image[0, -1_0:]
snake_case_ : Optional[Any] = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
snake_case_ : Optional[int] = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a__ ( self :List[Any] ):
snake_case_ : Any = torch_device == """cpu"""
snake_case_ : Any = True
snake_case_ : Dict = False
self._test_inference_batch_single_identical(
test_max_difference=_UpperCamelCase ,relax_max_difference=_UpperCamelCase ,test_mean_pixel_difference=_UpperCamelCase ,)
@skip_mps
def a__ ( self :str ):
snake_case_ : str = torch_device == """cpu"""
snake_case_ : List[str] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_UpperCamelCase ,test_mean_pixel_difference=_UpperCamelCase ,)
| 267
| 1
|
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class a_ ( __a , unittest.TestCase ):
A__ : Tuple = RoCBertTokenizer
A__ : Any = None
A__ : Union[str, Any] = False
A__ : Optional[int] = True
A__ : int = filter_non_english
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
super().setUp()
snake_case : Tuple = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
snake_case : List[Any] = {}
snake_case : List[Any] = {}
for i, value in enumerate(_A ):
snake_case : int = i
snake_case : Optional[int] = i
snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(_A , _A , ensure_ascii=_A )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(_A , _A , ensure_ascii=_A )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : str = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case : int = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(_A , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_A ) , [5, 6, 2, 5, 7, 8] )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : int = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Tuple = RoCBertBasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Any = RoCBertBasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Any = RoCBertBasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : str = RoCBertBasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Union[str, Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
snake_case : Any = {}
for i, token in enumerate(_A ):
snake_case : int = i
snake_case : Union[str, Any] = RoCBertWordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : Tuple = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
snake_case : Optional[int] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def lowerCAmelCase( self : str ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
snake_case : Any = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
snake_case : Any = tokenizer_r.encode_plus(
_A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , )
snake_case : Dict = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False
snake_case : Any = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : Optional[int] = ["""的""", """人""", """有"""]
snake_case : List[str] = """""".join(_A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case : List[str] = True
snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained(_A , **_A )
snake_case : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A )
snake_case : Tuple = tokenizer_p.encode(_A , add_special_tokens=_A )
snake_case : str = tokenizer_r.encode(_A , add_special_tokens=_A )
snake_case : Tuple = tokenizer_r.convert_ids_to_tokens(_A )
snake_case : Tuple = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
snake_case : Union[str, Any] = False
snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained(_A , **_A )
snake_case : Union[str, Any] = tokenizer_r.encode(_A , add_special_tokens=_A )
snake_case : Tuple = tokenizer_p.encode(_A , add_special_tokens=_A )
snake_case : Optional[int] = tokenizer_r.convert_ids_to_tokens(_A )
snake_case : Optional[int] = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case : Tuple = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(_A )
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
@slow
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : Any = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case : List[str] = tokenizer.encode('''你好''' , add_special_tokens=_A )
snake_case : int = tokenizer.encode('''你是谁''' , add_special_tokens=_A )
snake_case : Tuple = tokenizer.build_inputs_with_special_tokens(_A )
snake_case : str = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : int = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
snake_case : int = """你好,你是谁"""
snake_case : List[str] = tokenizer.tokenize(_A )
snake_case : Dict = tokenizer.convert_tokens_to_ids(_A )
snake_case : Optional[Any] = tokenizer.convert_tokens_to_shape_ids(_A )
snake_case : Any = tokenizer.convert_tokens_to_pronunciation_ids(_A )
snake_case : Dict = tokenizer.prepare_for_model(
_A , _A , _A , add_special_tokens=_A )
snake_case : Optional[int] = tokenizer.encode_plus(_A , add_special_tokens=_A )
self.assertEqual(_A , _A )
| 598
|
"""simple docstring"""
import argparse
import json
import subprocess
def UpperCamelCase ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Dict = (
f'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
_UpperCAmelCase : List[str] = subprocess.run(_lowerCAmelCase, shell=_lowerCAmelCase, stdout=subprocess.PIPE )
_UpperCAmelCase : List[str] = output.stdout.decode("""utf-8""" )
_UpperCAmelCase : Optional[int] = json.loads(_lowerCAmelCase )
_UpperCAmelCase : Dict = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCAmelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""", """w""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
if len(_lowerCAmelCase ) > 0:
_UpperCAmelCase : List[str] = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def UpperCamelCase ( _lowerCAmelCase : List[Any] ) -> Any:
return values.split(""",""" )
lowerCamelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
lowerCamelCase__ : Dict = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 238
| 0
|
"""simple docstring"""
import os
import sys
import unittest
a : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
a : Union[str, Any] = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
a : int = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase__ = get_test_to_tester_mapping(__UpperCamelCase )
UpperCAmelCase__ = get_test_to_tester_mapping(__UpperCamelCase )
UpperCAmelCase__ = {"""BertModelTest""": """BertModelTester"""}
UpperCAmelCase__ = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) , __UpperCamelCase )
def A__ ( self ):
UpperCAmelCase__ = get_model_to_test_mapping(__UpperCamelCase )
UpperCAmelCase__ = get_model_to_test_mapping(__UpperCamelCase )
UpperCAmelCase__ = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
UpperCAmelCase__ = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) , __UpperCamelCase )
def A__ ( self ):
UpperCAmelCase__ = get_model_to_tester_mapping(__UpperCamelCase )
UpperCAmelCase__ = get_model_to_tester_mapping(__UpperCamelCase )
UpperCAmelCase__ = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
UpperCAmelCase__ = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) , __UpperCamelCase )
| 702
|
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->Dict: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def snake_case__ ( ) ->Optional[Any]:
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
UpperCAmelCase__ = [1, 2, 3]
with pytest.raises(_SCREAMING_SNAKE_CASE ):
with parallel_backend("""unsupported backend""" ):
map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=2 )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
with parallel_backend("""unsupported backend""" ):
map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->Tuple:
UpperCAmelCase__ = [1, 2]
UpperCAmelCase__ = {"""a""": 1, """b""": 2}
UpperCAmelCase__ = {"""a""": [1, 2], """b""": [3, 4]}
UpperCAmelCase__ = {"""a""": {"""1""": 1}, """b""": 2}
UpperCAmelCase__ = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
UpperCAmelCase__ = [2, 3]
UpperCAmelCase__ = {"""a""": 2, """b""": 3}
UpperCAmelCase__ = {"""a""": [2, 3], """b""": [4, 5]}
UpperCAmelCase__ = {"""a""": {"""1""": 2}, """b""": 3}
UpperCAmelCase__ = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
| 422
| 0
|
from statistics import mean
import numpy as np
def __magic_name__ ( __a : list , __a : list , __a : list , __a : int ):
'''simple docstring'''
UpperCamelCase__ = 0
# Number of processes finished
UpperCamelCase__ = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
UpperCamelCase__ = [0] * no_of_process
# List to include calculation results
UpperCamelCase__ = [0] * no_of_process
# Sort by arrival time.
UpperCamelCase__ = [burst_time[i] for i in np.argsort(__lowerCAmelCase )]
UpperCamelCase__ = [process_name[i] for i in np.argsort(__lowerCAmelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
UpperCamelCase__ = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
UpperCamelCase__ = arrival_time[i]
UpperCamelCase__ = 0
# Index showing the location of the process being performed
UpperCamelCase__ = 0
# Saves the current response ratio.
UpperCamelCase__ = 0
for i in range(0 , __lowerCAmelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
UpperCamelCase__ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
UpperCamelCase__ = temp
UpperCamelCase__ = i
# Calculate the turn around time
UpperCamelCase__ = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
UpperCamelCase__ = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __magic_name__ ( __a : list , __a : list , __a : list , __a : int ):
'''simple docstring'''
UpperCamelCase__ = [0] * no_of_process
for i in range(0 , __lowerCAmelCase ):
UpperCamelCase__ = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowerCamelCase_ = 5
lowerCamelCase_ = ['A', 'B', 'C', 'D', 'E']
lowerCamelCase_ = [1, 2, 3, 4, 5]
lowerCamelCase_ = [1, 2, 3, 4, 5]
lowerCamelCase_ = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowerCamelCase_ = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
f'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
f'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(f'average waiting time : {mean(waiting_time):.5f}')
print(f'average turn around time : {mean(turn_around_time):.5f}')
| 513
|
'''simple docstring'''
def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ):
lowerCamelCase__ = len(__lowerCAmelCase )
print("""The following activities are selected:""" )
# The first activity is always selected
lowerCamelCase__ = 0
print(__lowerCAmelCase , end=""",""" )
# Consider rest of the activities
for j in range(__lowerCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__lowerCAmelCase , end=""",""" )
lowerCamelCase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Union[str, Any] = [1, 3, 0, 5, 8, 5]
UpperCamelCase : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 50
| 0
|
"""simple docstring"""
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCAmelCase__ : Optional[Any] = cst_fwd.get(UpperCamelCase__ , np.inf )
lowerCAmelCase__ : List[str] = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowerCAmelCase__ : int = new_cost_f
lowerCAmelCase__ : List[Any] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCAmelCase__ : Optional[int] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
lowerCAmelCase__ : List[Any] = -1
lowerCAmelCase__ : List[Any] = set()
lowerCAmelCase__ : Dict = set()
lowerCAmelCase__ : Optional[int] = {source: 0}
lowerCAmelCase__ : Any = {destination: 0}
lowerCAmelCase__ : Dict = {source: None}
lowerCAmelCase__ : Optional[Any] = {destination: None}
lowerCAmelCase__ : str = PriorityQueue()
lowerCAmelCase__ : Union[str, Any] = PriorityQueue()
lowerCAmelCase__ : List[str] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = queue_forward.get()
visited_forward.add(UpperCamelCase__ )
lowerCAmelCase__ , lowerCAmelCase__ : int = queue_backward.get()
visited_backward.add(UpperCamelCase__ )
lowerCAmelCase__ : List[Any] = pass_and_relaxation(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
lowerCAmelCase__ : Tuple = pass_and_relaxation(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCAmelCase__ : Any = shortest_distance
return shortest_path_distance
_A = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
_A = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {"""vocab_file""": """sentencepiece.model"""}
_A = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
}
_A = {
"""google/rembert""": 2_5_6,
}
class _lowerCamelCase ( a_ ):
_lowerCamelCase :List[str] = VOCAB_FILES_NAMES
_lowerCamelCase :Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any]=False , UpperCamelCase : Optional[int]=True , UpperCamelCase : List[Any]=True , UpperCamelCase : Any="[CLS]" , UpperCamelCase : Tuple="[SEP]" , UpperCamelCase : Any="[UNK]" , UpperCamelCase : str="[SEP]" , UpperCamelCase : Dict="[PAD]" , UpperCamelCase : Any="[CLS]" , UpperCamelCase : Dict="[MASK]" , **UpperCamelCase : Dict , ) -> Tuple:
"""simple docstring"""
super().__init__(
do_lower_case=UpperCamelCase , remove_space=UpperCamelCase , keep_accents=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , **UpperCamelCase , )
lowerCAmelCase__ : List[Any] = do_lower_case
lowerCAmelCase__ : str = remove_space
lowerCAmelCase__ : Optional[int] = keep_accents
lowerCAmelCase__ : str = vocab_file
lowerCAmelCase__ : Optional[int] = spm.SentencePieceProcessor()
self.sp_model.Load(UpperCamelCase )
@property
def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return len(self.sp_model )
def _lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = self.__dict__.copy()
lowerCAmelCase__ : Union[str, Any] = None
return state
def __setstate__( self : Any , UpperCamelCase : int ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = d
lowerCAmelCase__ : int = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str]=False ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.sp_model.EncodeAsPieces(UpperCamelCase )
return pieces
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return self.sp_model.PieceToId(UpperCamelCase )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCamelCase )
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.sp_model.decode_pieces(UpperCamelCase )
return out_string
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : Any = [self.sep_token_id]
lowerCAmelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1]
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : int = [self.sep_token_id]
lowerCAmelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase ( self : Any , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(UpperCamelCase ) )
return
lowerCAmelCase__ : Any = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
return (out_vocab_file,)
| 507
| 0
|
import doctest
from collections import deque
import numpy as np
class a :
"""simple docstring"""
def __init__( self : List[str] ) -> Any:
__UpperCAmelCase : List[str] = [2, 1, 2, -1]
__UpperCAmelCase : int = [1, 2, 3, 4]
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = len(self.first_signal )
__UpperCAmelCase : int = len(self.second_signal )
__UpperCAmelCase : Union[str, Any] = max(lowerCAmelCase__ , lowerCAmelCase__ )
# create a zero matrix of max_length x max_length
__UpperCAmelCase : str = [[0] * max_length for i in range(lowerCAmelCase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCAmelCase__ ):
__UpperCAmelCase : Any = deque(self.second_signal )
rotated_signal.rotate(lowerCAmelCase__ )
for j, item in enumerate(lowerCAmelCase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__UpperCAmelCase : str = np.matmul(np.transpose(lowerCAmelCase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCAmelCase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 63
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ = 100 ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 155
| 0
|
from __future__ import annotations
def __UpperCAmelCase ( __A ) -> List[str]:
'''simple docstring'''
return len(set(lowerCamelCase__ ) ) == len(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __UpperCAmelCase ( __A ) -> Union[str, Any]:
'''simple docstring'''
if (
(cp >= 0x4_E_0_0 and cp <= 0x9_F_F_F)
or (cp >= 0x3_4_0_0 and cp <= 0x4_D_B_F) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_A_6_D_F) #
or (cp >= 0x2_A_7_0_0 and cp <= 0x2_B_7_3_F) #
or (cp >= 0x2_B_7_4_0 and cp <= 0x2_B_8_1_F) #
or (cp >= 0x2_B_8_2_0 and cp <= 0x2_C_E_A_F) #
or (cp >= 0xF_9_0_0 and cp <= 0xF_A_F_F)
or (cp >= 0x2_F_8_0_0 and cp <= 0x2_F_A_1_F) #
): #
return True
return False
def __UpperCAmelCase ( __A ) -> List[str]:
'''simple docstring'''
for char in word:
UpperCAmelCase__ = ord(__A )
if not _is_chinese_char(__A ):
return 0
return 1
def __UpperCAmelCase ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = set()
for token in tokens:
UpperCAmelCase__ = len(__A ) > 1 and is_chinese(__A )
if chinese_word:
word_set.add(__A )
UpperCAmelCase__ = list(__A )
return word_list
def __UpperCAmelCase ( __A , __A ) -> Optional[Any]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
UpperCAmelCase__ = max([len(__A ) for w in chinese_word_set] )
UpperCAmelCase__ = bert_tokens
UpperCAmelCase__ , UpperCAmelCase__ = 0, len(__A )
while start < end:
UpperCAmelCase__ = True
if is_chinese(bert_word[start] ):
UpperCAmelCase__ = min(end - start , __A )
for i in range(__A , 1 , -1 ):
UpperCAmelCase__ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCAmelCase__ = "##" + bert_word[j]
UpperCAmelCase__ = start + i
UpperCAmelCase__ = False
break
if single_word:
start += 1
return bert_word
def __UpperCAmelCase ( __A , __A , __A ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = []
for i in range(0 , len(__A ) , 1_0_0 ):
UpperCAmelCase__ = ltp_tokenizer.seg(lines[i : i + 1_0_0] )[0]
UpperCAmelCase__ = [get_chinese_word(__A ) for r in res]
ltp_res.extend(__A )
assert len(__A ) == len(__A )
UpperCAmelCase__ = []
for i in range(0 , len(__A ) , 1_0_0 ):
UpperCAmelCase__ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=__A , truncation=__A , max_length=5_1_2 )
bert_res.extend(res["input_ids"] )
assert len(__A ) == len(__A )
UpperCAmelCase__ = []
for input_ids, chinese_word in zip(__A , __A ):
UpperCAmelCase__ = []
for id in input_ids:
UpperCAmelCase__ = bert_tokenizer._convert_id_to_token(__A )
input_tokens.append(__A )
UpperCAmelCase__ = add_sub_symbol(__A , __A )
UpperCAmelCase__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__A ):
if token[:2] == "##":
UpperCAmelCase__ = token[2:]
# save chinese tokens' pos
if len(__A ) == 1 and _is_chinese_char(ord(__A ) ):
ref_id.append(__A )
ref_ids.append(__A )
assert len(__A ) == len(__A )
return ref_ids
def __UpperCAmelCase ( __A ) -> Optional[int]:
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = [line.strip() for line in data if len(__A ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCAmelCase__ = LTP(args.ltp ) # faster in GPU device
UpperCAmelCase__ = BertTokenizer.from_pretrained(args.bert )
UpperCAmelCase__ = prepare_ref(__A , __A , __A )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
UpperCAmelCase__ = [json.dumps(__A ) + "\n" for ref in ref_ids]
f.writelines(__A )
if __name__ == "__main__":
A = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
A = parser.parse_args()
main(args)
| 277
| 0
|
import numpy as np
def __lowercase ( _UpperCamelCase ) ->np.ndarray:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def __lowercase ( _UpperCamelCase ) ->np.ndarray:
"""simple docstring"""
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {'''vocab_file''': '''vocab.txt'''}
__a = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
__a = {
'''facebook/esm2_t6_8M_UR50D''': 10_24,
'''facebook/esm2_t12_35M_UR50D''': 10_24,
}
def __lowercase ( _UpperCamelCase ) ->Tuple:
"""simple docstring"""
with open(_UpperCamelCase, '''r''' ) as f:
lowercase : List[Any] = f.read().splitlines()
return [l.strip() for l in lines]
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = VOCAB_FILES_NAMES
A : List[str] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[str] = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<cls>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__="<eos>" , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : str = load_vocab_file(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = dict(enumerate(self.all_tokens ) )
lowercase : Tuple = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowercase : Tuple = unk_token
lowercase : Optional[Any] = cls_token
lowercase : Union[str, Any] = pad_token
lowercase : Dict = mask_token
lowercase : Dict = eos_token
lowercase : Any = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self._id_to_token.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self._token_to_id.get(SCREAMING_SNAKE_CASE__ , self._token_to_id.get(self.unk_token ) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
return text.split()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__=False ):
return len(self._id_to_token )
def __lowerCamelCase ( self ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self._token_to_id.get(SCREAMING_SNAKE_CASE__ , self._token_to_id.get(self.unk_token ) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self._id_to_token.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : List[str] = [self.cls_token_id]
lowercase : Dict = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase : Tuple = [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(SCREAMING_SNAKE_CASE__ ) + [1]
return mask
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __lowerCamelCase ( self ):
return self.get_vocab_size(with_added_tokens=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ):
return super()._add_tokens(SCREAMING_SNAKE_CASE__ , special_tokens=SCREAMING_SNAKE_CASE__ )
| 319
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCAmelCase = """wav2vec2"""
def __init__(self , lowerCAmelCase__=32 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__="group" , lowerCAmelCase__="gelu" , lowerCAmelCase__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowerCAmelCase__=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__=False , lowerCAmelCase__=1_28 , lowerCAmelCase__=16 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.05 , lowerCAmelCase__=10 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0 , lowerCAmelCase__=10 , lowerCAmelCase__=0 , lowerCAmelCase__=3_20 , lowerCAmelCase__=2 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_00 , lowerCAmelCase__=2_56 , lowerCAmelCase__=2_56 , lowerCAmelCase__=0.1 , lowerCAmelCase__="sum" , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=2_56 , lowerCAmelCase__=(5_12, 5_12, 5_12, 5_12, 15_00) , lowerCAmelCase__=(5, 3, 3, 1, 1) , lowerCAmelCase__=(1, 2, 3, 1, 1) , lowerCAmelCase__=5_12 , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=False , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
_UpperCamelCase : Any = hidden_size
_UpperCamelCase : int = feat_extract_norm
_UpperCamelCase : List[str] = feat_extract_activation
_UpperCamelCase : List[Any] = list(lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = list(lowerCAmelCase__ )
_UpperCamelCase : Tuple = list(lowerCAmelCase__ )
_UpperCamelCase : str = conv_bias
_UpperCamelCase : int = num_conv_pos_embeddings
_UpperCamelCase : Dict = num_conv_pos_embedding_groups
_UpperCamelCase : Union[str, Any] = len(self.conv_dim )
_UpperCamelCase : Dict = num_hidden_layers
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Optional[Any] = num_attention_heads
_UpperCamelCase : str = hidden_dropout
_UpperCamelCase : str = attention_dropout
_UpperCamelCase : List[Any] = activation_dropout
_UpperCamelCase : List[str] = feat_proj_dropout
_UpperCamelCase : List[str] = final_dropout
_UpperCamelCase : Optional[int] = layerdrop
_UpperCamelCase : List[Any] = layer_norm_eps
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : Dict = vocab_size
_UpperCamelCase : Tuple = do_stable_layer_norm
_UpperCamelCase : int = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase : Any = apply_spec_augment
_UpperCamelCase : Union[str, Any] = mask_time_prob
_UpperCamelCase : Optional[int] = mask_time_length
_UpperCamelCase : Any = mask_time_min_masks
_UpperCamelCase : List[str] = mask_feature_prob
_UpperCamelCase : Tuple = mask_feature_length
_UpperCamelCase : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_UpperCamelCase : List[Any] = num_codevectors_per_group
_UpperCamelCase : int = num_codevector_groups
_UpperCamelCase : Optional[Any] = contrastive_logits_temperature
_UpperCamelCase : Union[str, Any] = feat_quantizer_dropout
_UpperCamelCase : str = num_negatives
_UpperCamelCase : Tuple = codevector_dim
_UpperCamelCase : Tuple = proj_codevector_dim
_UpperCamelCase : str = diversity_loss_weight
# ctc loss
_UpperCamelCase : Any = ctc_loss_reduction
_UpperCamelCase : Tuple = ctc_zero_infinity
# adapter
_UpperCamelCase : List[str] = add_adapter
_UpperCamelCase : Optional[Any] = adapter_kernel_size
_UpperCamelCase : Union[str, Any] = adapter_stride
_UpperCamelCase : Tuple = num_adapter_layers
_UpperCamelCase : Tuple = output_hidden_size or hidden_size
_UpperCamelCase : Optional[Any] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCamelCase : Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase : int = list(lowerCAmelCase__ )
_UpperCamelCase : List[Any] = list(lowerCAmelCase__ )
_UpperCamelCase : Tuple = list(lowerCAmelCase__ )
_UpperCamelCase : Dict = xvector_output_dim
@property
def lowercase_ (self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 239
|
"""simple docstring"""
from itertools import count
def __lowerCAmelCase ( __lowerCAmelCase : int = 50 ) -> int:
_UpperCamelCase : Any = [1] * min_block_length
for n in count(__lowerCAmelCase ):
fill_count_functions.append(1 )
for block_length in range(__lowerCAmelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(f'{solution() = }')
| 239
| 1
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowercase__ ( lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCAmelCase = model_type_to_module_name(lowerCAmelCase__ )
UpperCAmelCase = importlib.import_module(F".{module_name}" , 'transformers.models' )
try:
return getattr(lowerCAmelCase__ , lowerCAmelCase__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase__ , '__name__' , lowerCAmelCase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCAmelCase = importlib.import_module('transformers' )
if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
return getattr(lowerCAmelCase__ , lowerCAmelCase__ )
return None
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : Any = None , lowerCAmelCase : Optional[int] = False , lowerCAmelCase : Optional[int] = False , lowerCAmelCase : Dict = None , lowerCAmelCase : List[Any] = None , lowerCAmelCase : Any = None , lowerCAmelCase : Dict = False , **lowerCAmelCase : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = get_file_from_repo(
lowerCAmelCase__ , lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(lowerCAmelCase__ , encoding='utf-8' ) as reader:
return json.load(lowerCAmelCase__ )
class _UpperCAmelCase :
def __init__( self ) -> List[str]:
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(_snake_case )
def a_ ( cls , lowercase_ , **lowercase_ ) -> List[Any]:
UpperCAmelCase = kwargs.pop('config' , _snake_case )
UpperCAmelCase = kwargs.pop('trust_remote_code' , _snake_case )
UpperCAmelCase = True
UpperCAmelCase = FeatureExtractionMixin.get_feature_extractor_dict(_snake_case , **_snake_case )
UpperCAmelCase = config_dict.get('feature_extractor_type' , _snake_case )
UpperCAmelCase = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
UpperCAmelCase = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_snake_case , _snake_case ):
UpperCAmelCase = AutoConfig.from_pretrained(_snake_case , **_snake_case )
# It could be in `config.feature_extractor_type``
UpperCAmelCase = getattr(_snake_case , 'feature_extractor_type' , _snake_case )
if hasattr(_snake_case , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
UpperCAmelCase = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
UpperCAmelCase = feature_extractor_class_from_name(_snake_case )
UpperCAmelCase = feature_extractor_auto_map is not None
UpperCAmelCase = feature_extractor_class is not None or type(_snake_case ) in FEATURE_EXTRACTOR_MAPPING
UpperCAmelCase = resolve_trust_remote_code(
_snake_case , _snake_case , _snake_case , _snake_case )
if has_remote_code and trust_remote_code:
UpperCAmelCase = get_class_from_dynamic_module(
_snake_case , _snake_case , **_snake_case )
UpperCAmelCase = kwargs.pop('code_revision' , _snake_case )
if os.path.isdir(_snake_case ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_snake_case , **_snake_case )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_snake_case , **_snake_case )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_snake_case ) in FEATURE_EXTRACTOR_MAPPING:
UpperCAmelCase = FEATURE_EXTRACTOR_MAPPING[type(_snake_case )]
return feature_extractor_class.from_dict(_snake_case , **_snake_case )
raise ValueError(
F"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
F"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def a_ ( lowercase_ , lowercase_ ) -> Union[str, Any]:
FEATURE_EXTRACTOR_MAPPING.register(_snake_case , _snake_case )
| 373
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 424
| 0
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[Any] = ["""image_processor""", """tokenizer"""]
_UpperCamelCase : List[Any] = """BlipImageProcessor"""
_UpperCamelCase : Optional[Any] = """AutoTokenizer"""
def __init__( self , snake_case , snake_case ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = False
super().__init__(snake_case , snake_case )
a__ : int = self.image_processor
def __call__( self , snake_case = None , snake_case = None , snake_case = True , snake_case = False , snake_case = None , snake_case = None , snake_case = 0 , snake_case = None , snake_case = None , snake_case = False , snake_case = False , snake_case = False , snake_case = False , snake_case = False , snake_case = True , snake_case = None , **snake_case , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
a__ : Optional[int] = self.tokenizer
a__ : Optional[int] = self.tokenizer(
text=snake_case , add_special_tokens=snake_case , padding=snake_case , truncation=snake_case , max_length=snake_case , stride=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , return_overflowing_tokens=snake_case , return_special_tokens_mask=snake_case , return_offsets_mapping=snake_case , return_token_type_ids=snake_case , return_length=snake_case , verbose=snake_case , return_tensors=snake_case , **snake_case , )
return text_encoding
# add pixel_values
a__ : Dict = self.image_processor(snake_case , return_tensors=snake_case )
if text is not None:
a__ : Tuple = self.tokenizer(
text=snake_case , add_special_tokens=snake_case , padding=snake_case , truncation=snake_case , max_length=snake_case , stride=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , return_overflowing_tokens=snake_case , return_special_tokens_mask=snake_case , return_offsets_mapping=snake_case , return_token_type_ids=snake_case , return_length=snake_case , verbose=snake_case , return_tensors=snake_case , **snake_case , )
else:
a__ : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(snake_case )
return encoding_image_processor
def _snake_case ( self , *snake_case , **snake_case ) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def _snake_case ( self , *snake_case , **snake_case ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*snake_case , **snake_case )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Union[str, Any] = self.tokenizer.model_input_names
a__ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 629
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[Any] = """informer"""
_UpperCamelCase : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = None , snake_case = "mean" , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 64 , snake_case = 32 , snake_case = 32 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = True , snake_case = "gelu" , snake_case = 0.05 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 100 , snake_case = 0.02 , snake_case=True , snake_case = "prob" , snake_case = 5 , snake_case = True , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = prediction_length
a__ : Optional[int] = context_length or prediction_length
a__ : Optional[int] = distribution_output
a__ : str = loss
a__ : Optional[Any] = input_size
a__ : int = num_time_features
a__ : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
a__ : Optional[int] = scaling
a__ : List[str] = num_dynamic_real_features
a__ : Optional[int] = num_static_real_features
a__ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
a__ : List[Any] = cardinality
else:
a__ : Tuple = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
a__ : Tuple = embedding_dimension
else:
a__ : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
a__ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
a__ : int = input_size * len(self.lags_sequence ) + self._number_of_features
a__ : Union[str, Any] = d_model
a__ : Any = encoder_attention_heads
a__ : Optional[Any] = decoder_attention_heads
a__ : int = encoder_ffn_dim
a__ : List[Any] = decoder_ffn_dim
a__ : List[str] = encoder_layers
a__ : Any = decoder_layers
a__ : List[str] = dropout
a__ : int = attention_dropout
a__ : List[Any] = activation_dropout
a__ : Optional[int] = encoder_layerdrop
a__ : Tuple = decoder_layerdrop
a__ : Any = activation_function
a__ : Tuple = init_std
a__ : Optional[int] = use_cache
# Informer
a__ : Union[str, Any] = attention_type
a__ : List[str] = sampling_factor
a__ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _snake_case ( self ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 629
| 1
|
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
SCREAMING_SNAKE_CASE = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
SCREAMING_SNAKE_CASE = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
SCREAMING_SNAKE_CASE = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def A__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def A__ ( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any=None , UpperCAmelCase : Tuple=True , UpperCAmelCase : List[str]=False ) -> Union[str, Any]:
'''simple docstring'''
if rouge_types is None:
lowercase : Dict =['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
lowercase : Optional[Any] =rouge_scorer.RougeScorer(rouge_types=UpperCAmelCase , use_stemmer=UpperCAmelCase )
if use_aggregator:
lowercase : int =scoring.BootstrapAggregator()
else:
lowercase : Any =[]
for ref, pred in zip(UpperCAmelCase , UpperCAmelCase ):
lowercase : Any =scorer.score(UpperCAmelCase , UpperCAmelCase )
if use_aggregator:
aggregator.add_scores(UpperCAmelCase )
else:
scores.append(UpperCAmelCase )
if use_aggregator:
lowercase : int =aggregator.aggregate()
else:
lowercase : Dict ={}
for key in scores[0]:
lowercase : Any =[score[key] for score in scores]
return result
| 94
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def _UpperCAmelCase ( __lowerCAmelCase ):
raise NotImplementedError()
@abstractmethod
def _UpperCAmelCase ( self ):
raise NotImplementedError()
| 208
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowercase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 718
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int ,A_ : Tuple ,A_ : str=7 ,A_ : Tuple=3 ,A_ : List[Any]=18 ,A_ : List[str]=30 ,A_ : Optional[Any]=400 ,A_ : Any=True ,A_ : Optional[Any]=None ,A_ : List[str]=True ,) -> str:
A = size if size is not None else {'height': 18, 'width': 18}
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = do_normalize
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = ImageGPTImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ ,'clusters' ) )
self.assertTrue(hasattr(A_ ,'do_resize' ) )
self.assertTrue(hasattr(A_ ,'size' ) )
self.assertTrue(hasattr(A_ ,'do_normalize' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
A = self.image_processing_class(**self.image_processor_dict )
A = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,obj[key] ) )
else:
self.assertEqual(obj[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(A_ ,'image_processor.json' )
image_processor_first.to_json_file(A_ )
A = self.image_processing_class.from_json_file(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(A_ )
A = self.image_processing_class.from_pretrained(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
pass
def _snake_case ( ):
A = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
A = Image.open(dataset[4]['file'] )
A = Image.open(dataset[5]['file'] )
A = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
A = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
A = prepare_images()
# test non-batched
A = image_processing(images[0] ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(1, 1024) )
A = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() ,A_ )
# test batched
A = image_processing(A_ ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(2, 1024) )
A = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() ,A_ )
| 22
| 0
|
'''simple docstring'''
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
_lowerCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = """mask2former"""
__SCREAMING_SNAKE_CASE :Tuple = ["""swin"""]
__SCREAMING_SNAKE_CASE :List[Any] = {"""hidden_size""": """hidden_dim"""}
def __init__( self : str , a__ : Optional[Dict] = None , a__ : int = 256 , a__ : int = 256 , a__ : int = 256 , a__ : int = 1024 , a__ : str = "relu" , a__ : int = 6 , a__ : int = 10 , a__ : int = 8 , a__ : float = 0.0 , a__ : int = 2048 , a__ : bool = False , a__ : bool = False , a__ : int = 4 , a__ : int = 255 , a__ : int = 100 , a__ : float = 0.1 , a__ : float = 2.0 , a__ : float = 5.0 , a__ : float = 5.0 , a__ : int = 1_2544 , a__ : float = 3.0 , a__ : float = 0.75 , a__ : float = 0.02 , a__ : float = 1.0 , a__ : bool = True , a__ : List[int] = [4, 8, 16, 32] , a__ : bool = None , **a__ : str , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
__magic_name__ = CONFIG_MAPPING['''swin'''](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=a__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(a__ , a__ ):
__magic_name__ = backbone_config.pop('''model_type''' )
__magic_name__ = CONFIG_MAPPING[backbone_model_type]
__magic_name__ = config_class.from_dict(a__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {",".join(self.backbones_supported )}''' )
__magic_name__ = backbone_config
__magic_name__ = feature_size
__magic_name__ = mask_feature_size
__magic_name__ = hidden_dim
__magic_name__ = encoder_feedforward_dim
__magic_name__ = activation_function
__magic_name__ = encoder_layers
__magic_name__ = decoder_layers
__magic_name__ = num_attention_heads
__magic_name__ = dropout
__magic_name__ = dim_feedforward
__magic_name__ = pre_norm
__magic_name__ = enforce_input_projection
__magic_name__ = common_stride
__magic_name__ = ignore_value
__magic_name__ = num_queries
__magic_name__ = no_object_weight
__magic_name__ = class_weight
__magic_name__ = mask_weight
__magic_name__ = dice_weight
__magic_name__ = train_num_points
__magic_name__ = oversample_ratio
__magic_name__ = importance_sample_ratio
__magic_name__ = init_std
__magic_name__ = init_xavier_std
__magic_name__ = use_auxiliary_loss
__magic_name__ = feature_strides
__magic_name__ = output_auxiliary_logits
__magic_name__ = decoder_layers
super().__init__(**a__ )
@classmethod
def snake_case__ ( cls : List[Any] , a__ : PretrainedConfig , **a__ : List[str] ):
return cls(
backbone_config=a__ , **a__ , )
def snake_case__ ( self : Dict ):
__magic_name__ = copy.deepcopy(self.__dict__ )
__magic_name__ = self.backbone_config.to_dict()
__magic_name__ = self.__class__.model_type
return output
| 432
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" ,"""False""" ) ) is not True ,reason="""Skipping test because should only be run when releasing minor transformers version""" ,)
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : int ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=a__ , )
assert hasattr(self , '''env''' )
def snake_case__ ( self : str , a__ : int=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=a__ , instance_type=self.instance_type , debugger_hook_config=a__ , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def snake_case__ ( self : Optional[int] , a__ : Tuple ):
TrainingJobAnalytics(a__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
def snake_case__ ( self : Any ):
# create estimator
__magic_name__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__magic_name__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__magic_name__ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__magic_name__ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__magic_name__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , a__ )
| 432
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 599
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowerCAmelCase__ ( a_ : bytes , a_ : int ) -> np.array:
UpperCAmelCase__ : Union[str, Any] = f"""{sampling_rate}"""
UpperCAmelCase__ : List[Any] = '''1'''
UpperCAmelCase__ : int = '''f32le'''
UpperCAmelCase__ : Tuple = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(a_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCAmelCase__ : Dict = ffmpeg_process.communicate(a_ )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
UpperCAmelCase__ : Dict = output_stream[0]
UpperCAmelCase__ : int = np.frombuffer(a_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def lowerCAmelCase__ ( a_ : int , a_ : float , a_ : str = "f32le" , ) -> List[str]:
UpperCAmelCase__ : str = f"""{sampling_rate}"""
UpperCAmelCase__ : Tuple = '''1'''
if format_for_conversion == "s16le":
UpperCAmelCase__ : str = 2
elif format_for_conversion == "f32le":
UpperCAmelCase__ : Any = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
UpperCAmelCase__ : Dict = platform.system()
if system == "Linux":
UpperCAmelCase__ : Union[str, Any] = '''alsa'''
UpperCAmelCase__ : List[Any] = '''default'''
elif system == "Darwin":
UpperCAmelCase__ : List[str] = '''avfoundation'''
UpperCAmelCase__ : List[Any] = ''':0'''
elif system == "Windows":
UpperCAmelCase__ : Optional[int] = '''dshow'''
UpperCAmelCase__ : Any = '''default'''
UpperCAmelCase__ : str = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
UpperCAmelCase__ : Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCAmelCase__ : Dict = _ffmpeg_stream(a_ , a_ )
for item in iterator:
yield item
def lowerCAmelCase__ ( a_ : int , a_ : float , a_ : Optional[int] = None , a_ : Optional[Union[Tuple[float, float], float]] = None , a_ : str = "f32le" , ) -> Any:
if stream_chunk_s is not None:
UpperCAmelCase__ : int = stream_chunk_s
else:
UpperCAmelCase__ : str = chunk_length_s
UpperCAmelCase__ : Any = ffmpeg_microphone(a_ , a_ , format_for_conversion=a_ )
if format_for_conversion == "s16le":
UpperCAmelCase__ : Dict = np.intaa
UpperCAmelCase__ : List[Any] = 2
elif format_for_conversion == "f32le":
UpperCAmelCase__ : Tuple = np.floataa
UpperCAmelCase__ : List[str] = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
UpperCAmelCase__ : Any = chunk_length_s / 6
UpperCAmelCase__ : Union[str, Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a_ , (int, float) ):
UpperCAmelCase__ : int = [stride_length_s, stride_length_s]
UpperCAmelCase__ : List[str] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCAmelCase__ : Dict = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCAmelCase__ : Optional[int] = datetime.datetime.now()
UpperCAmelCase__ : Dict = datetime.timedelta(seconds=a_ )
for item in chunk_bytes_iter(a_ , a_ , stride=(stride_left, stride_right) , stream=a_ ):
# Put everything back in numpy scale
UpperCAmelCase__ : str = np.frombuffer(item['''raw'''] , dtype=a_ )
UpperCAmelCase__ : Any = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
UpperCAmelCase__ : List[Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def lowerCAmelCase__ ( a_ : str , a_ : int , a_ : Tuple[int, int] , a_ : bool = False ) -> Any:
UpperCAmelCase__ : Union[str, Any] = B''''''
UpperCAmelCase__ , UpperCAmelCase__ : int = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
UpperCAmelCase__ : List[Any] = 0
for raw in iterator:
acc += raw
if stream and len(a_ ) < chunk_len:
UpperCAmelCase__ : str = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a_ ) >= chunk_len:
# We are flushing the accumulator
UpperCAmelCase__ : Union[str, Any] = (_stride_left, stride_right)
UpperCAmelCase__ : int = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
UpperCAmelCase__ : List[Any] = False
yield item
UpperCAmelCase__ : Optional[int] = stride_left
UpperCAmelCase__ : Dict = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a_ ) > stride_left:
UpperCAmelCase__ : List[str] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
UpperCAmelCase__ : Optional[Any] = False
yield item
def lowerCAmelCase__ ( a_ : str , a_ : int ) -> Any:
UpperCAmelCase__ : str = 2**2_4 # 16Mo
try:
with subprocess.Popen(a_ , stdout=subprocess.PIPE , bufsize=a_ ) as ffmpeg_process:
while True:
UpperCAmelCase__ : Tuple = ffmpeg_process.stdout.read(a_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 599
| 1
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ):
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : Any = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : Tuple = is_training
UpperCAmelCase__ : Union[str, Any] = use_token_type_ids
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : Optional[Any] = vocab_size
UpperCAmelCase__ : int = hidden_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : str = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : Any = hidden_dropout_prob
UpperCAmelCase__ : Any = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = max_position_embeddings
UpperCAmelCase__ : str = type_vocab_size
UpperCAmelCase__ : str = type_sequence_label_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : Tuple = num_labels
UpperCAmelCase__ : int = num_choices
UpperCAmelCase__ : Tuple = scope
UpperCAmelCase__ : str = self.vocab_size - 1
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : int = None
UpperCAmelCase__ : List[str] = None
if self.use_labels:
UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase__ : int = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase):
UpperCAmelCase__ : int = OpenAIGPTModel(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : List[str] = model(_lowerCamelCase , token_type_ids=_lowerCamelCase , head_mask=_lowerCamelCase)
UpperCAmelCase__ : str = model(_lowerCamelCase , token_type_ids=_lowerCamelCase)
UpperCAmelCase__ : List[str] = model(_lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = OpenAIGPTLMHeadModel(_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : List[str] = model(_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase):
UpperCAmelCase__ : List[str] = OpenAIGPTDoubleHeadsModel(_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : str = model(_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase):
UpperCAmelCase__ : int = self.num_labels
UpperCAmelCase__ : str = OpenAIGPTForSequenceClassification(_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase__ : List[str] = model(_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def snake_case__ ( self):
UpperCAmelCase__ : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Dict = config_and_inputs
UpperCAmelCase__ : Union[str, Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class _snake_case ( a__ , a__ , a__ , unittest.TestCase ):
lowerCAmelCase :Optional[Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCAmelCase :Optional[int] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCAmelCase :List[Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False):
UpperCAmelCase__ : List[Any] = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase)
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
UpperCAmelCase__ : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCamelCase , )
UpperCAmelCase__ : int = inputs_dict["""labels"""]
UpperCAmelCase__ : List[Any] = inputs_dict["""labels"""]
UpperCAmelCase__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_lowerCamelCase , )
UpperCAmelCase__ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCamelCase)
return inputs_dict
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = OpenAIGPTModelTester(self)
UpperCAmelCase__ : int = ConfigTester(self , config_class=_lowerCamelCase , n_embd=37)
def snake_case__ ( self):
self.config_tester.run_common_tests()
def snake_case__ ( self):
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_lowerCamelCase)
@slow
def snake_case__ ( self):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[Any] = OpenAIGPTModel.from_pretrained(_lowerCamelCase)
self.assertIsNotNone(_lowerCamelCase)
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""")
model.to(_lowerCamelCase)
UpperCAmelCase__ : List[Any] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=_lowerCamelCase) # the president is
UpperCAmelCase__ : Tuple = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
UpperCAmelCase__ : str = model.generate(_lowerCamelCase , do_sample=_lowerCamelCase)
self.assertListEqual(output_ids[0].tolist() , _lowerCamelCase)
| 407
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :List[str] = XLNetTokenizer
lowerCAmelCase :Union[str, Any] = XLNetTokenizerFast
lowerCAmelCase :Union[str, Any] = True
lowerCAmelCase :int = True
def snake_case__ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Optional[Any] = XLNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = """<s>"""
UpperCAmelCase__ : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase) , _lowerCamelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase) , _lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : int = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<unk>""")
self.assertEqual(vocab_keys[1] , """<s>""")
self.assertEqual(vocab_keys[-1] , """<eod>""")
self.assertEqual(len(_lowerCamelCase) , 1006)
def snake_case__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def snake_case__ ( self):
UpperCAmelCase__ : int = XLNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase)
UpperCAmelCase__ : str = tokenizer.tokenize("""This is a test""")
self.assertListEqual(_lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase) , [285, 46, 10, 170, 382])
UpperCAmelCase__ : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase__ : List[str] = tokenizer.convert_tokens_to_ids(_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4])
UpperCAmelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(_lowerCamelCase)
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = XLNetTokenizer(_lowerCamelCase , do_lower_case=_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""▁he""", """ll""", """o"""])
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = XLNetTokenizer(_lowerCamelCase , do_lower_case=_lowerCamelCase)
UpperCAmelCase__ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = XLNetTokenizer.from_pretrained("""xlnet-base-cased""")
UpperCAmelCase__ : Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCamelCase)
UpperCAmelCase__ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCamelCase)
UpperCAmelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def snake_case__ ( self):
# fmt: off
UpperCAmelCase__ : List[Any] = {"""input_ids""": [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 407
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {
"google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json",
"google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __lowerCamelCase ( lowercase ):
lowerCamelCase__: str = '''mobilenet_v1'''
def __init__( self , __snake_case=3 , __snake_case=2_2_4 , __snake_case=1.0 , __snake_case=8 , __snake_case="relu6" , __snake_case=True , __snake_case=0.9_99 , __snake_case=0.02 , __snake_case=0.0_01 , **__snake_case , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
UpperCAmelCase: str = num_channels
UpperCAmelCase: Optional[Any] = image_size
UpperCAmelCase: Any = depth_multiplier
UpperCAmelCase: Tuple = min_depth
UpperCAmelCase: List[str] = hidden_act
UpperCAmelCase: int = tf_padding
UpperCAmelCase: Optional[int] = classifier_dropout_prob
UpperCAmelCase: Optional[int] = initializer_range
UpperCAmelCase: Dict = layer_norm_eps
class __lowerCamelCase ( lowercase ):
lowerCamelCase__: List[str] = version.parse('''1.11''' )
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def A__ ( self ) -> float:
"""simple docstring"""
return 1e-4
| 721
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __lowerCamelCase ( pl.LightningModule ):
def __init__( self , __snake_case ) -> int:
"""simple docstring"""
super().__init__()
UpperCAmelCase: Optional[Any] = model
UpperCAmelCase: str = 2
UpperCAmelCase: Tuple = nn.Linear(self.model.config.hidden_size , self.num_labels )
def A__ ( self ) -> List[str]:
"""simple docstring"""
pass
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str , snake_case_ : str ):
'''simple docstring'''
UpperCAmelCase: List[str] = LongformerModel.from_pretrained(snake_case_ )
UpperCAmelCase: Tuple = LightningModel(snake_case_ )
UpperCAmelCase: List[Any] = torch.load(snake_case_ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
UpperCAmelCase: Dict = LongformerForQuestionAnswering.from_pretrained(snake_case_ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(snake_case_ )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case_ : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 166
| 0
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _UpperCAmelCase :
def __init__( self : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : int=True , UpperCAmelCase : Any=99 , UpperCAmelCase : List[str]=32 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : List[str]=37 , UpperCAmelCase : List[Any]="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : Tuple=5_12 , UpperCAmelCase : Any=16 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Dict=3 , UpperCAmelCase : int=4 , UpperCAmelCase : Dict=None , ):
SCREAMING_SNAKE_CASE_ :Dict = parent
SCREAMING_SNAKE_CASE_ :Optional[Any] = batch_size
SCREAMING_SNAKE_CASE_ :Any = seq_length
SCREAMING_SNAKE_CASE_ :List[str] = is_training
SCREAMING_SNAKE_CASE_ :List[str] = use_input_mask
SCREAMING_SNAKE_CASE_ :Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE_ :List[Any] = use_labels
SCREAMING_SNAKE_CASE_ :str = vocab_size
SCREAMING_SNAKE_CASE_ :Tuple = hidden_size
SCREAMING_SNAKE_CASE_ :List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE_ :Dict = num_attention_heads
SCREAMING_SNAKE_CASE_ :Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ :Any = hidden_act
SCREAMING_SNAKE_CASE_ :List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ :List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ :Any = max_position_embeddings
SCREAMING_SNAKE_CASE_ :Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE_ :Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE_ :Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ :Dict = num_labels
SCREAMING_SNAKE_CASE_ :Optional[int] = num_choices
SCREAMING_SNAKE_CASE_ :Any = scope
def _snake_case ( self : str):
SCREAMING_SNAKE_CASE_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_ :Tuple = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ :List[str] = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE_ :str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
SCREAMING_SNAKE_CASE_ :List[str] = None
SCREAMING_SNAKE_CASE_ :str = None
SCREAMING_SNAKE_CASE_ :Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ :Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ :int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE_ :Any = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE_ :List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self : Optional[Any]):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , use_stable_embedding=UpperCAmelCase , )
def _snake_case ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]):
SCREAMING_SNAKE_CASE_ :str = OpenLlamaModel(config=UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
SCREAMING_SNAKE_CASE_ :Any = model(UpperCAmelCase , attention_mask=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[Any] = model(UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _snake_case ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , ):
SCREAMING_SNAKE_CASE_ :Optional[Any] = True
SCREAMING_SNAKE_CASE_ :str = OpenLlamaModel(UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
SCREAMING_SNAKE_CASE_ :Optional[int] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ :Optional[int] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ :str = model(UpperCAmelCase , attention_mask=UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _snake_case ( self : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , ):
SCREAMING_SNAKE_CASE_ :Optional[int] = OpenLlamaForCausalLM(config=UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
SCREAMING_SNAKE_CASE_ :Dict = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _snake_case ( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE_ :Any = True
SCREAMING_SNAKE_CASE_ :Optional[Any] = True
SCREAMING_SNAKE_CASE_ :Optional[Any] = OpenLlamaForCausalLM(config=UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ :Any = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , use_cache=UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ :Any = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ :List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size)
SCREAMING_SNAKE_CASE_ :Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
SCREAMING_SNAKE_CASE_ :Dict = torch.cat([input_ids, next_tokens] , dim=-1)
SCREAMING_SNAKE_CASE_ :int = torch.cat([input_mask, next_mask] , dim=-1)
SCREAMING_SNAKE_CASE_ :Dict = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )["hidden_states"][0]
SCREAMING_SNAKE_CASE_ :Dict = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )["hidden_states"][0]
# select random slice
SCREAMING_SNAKE_CASE_ :List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
SCREAMING_SNAKE_CASE_ :int = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ :Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-3))
def _snake_case ( self : List[Any]):
SCREAMING_SNAKE_CASE_ :int = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) :List[str] = config_and_inputs
SCREAMING_SNAKE_CASE_ :Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowercase , lowercase , lowercase , unittest.TestCase ):
lowerCamelCase_ : int = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowerCamelCase_ : str = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ : int = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ : Dict = False
lowerCamelCase_ : int = False
def _snake_case ( self : Tuple):
SCREAMING_SNAKE_CASE_ :Dict = OpenLlamaModelTester(self)
SCREAMING_SNAKE_CASE_ :int = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37)
def _snake_case ( self : List[Any]):
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase)
def _snake_case ( self : str):
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ :Optional[int] = type
self.model_tester.create_and_check_model(*UpperCAmelCase)
def _snake_case ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ :str = 3
SCREAMING_SNAKE_CASE_ :int = input_dict["input_ids"]
SCREAMING_SNAKE_CASE_ :List[Any] = input_ids.ne(1).to(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ :int = OpenLlamaForSequenceClassification(UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
SCREAMING_SNAKE_CASE_ :Tuple = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _snake_case ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ :Optional[int] = 3
SCREAMING_SNAKE_CASE_ :List[str] = "single_label_classification"
SCREAMING_SNAKE_CASE_ :Dict = input_dict["input_ids"]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = input_ids.ne(1).to(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ :Any = OpenLlamaForSequenceClassification(UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
SCREAMING_SNAKE_CASE_ :Tuple = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _snake_case ( self : Tuple):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ :Optional[Any] = 3
SCREAMING_SNAKE_CASE_ :int = "multi_label_classification"
SCREAMING_SNAKE_CASE_ :Dict = input_dict["input_ids"]
SCREAMING_SNAKE_CASE_ :Tuple = input_ids.ne(1).to(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = OpenLlamaForSequenceClassification(UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
SCREAMING_SNAKE_CASE_ :str = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test")
def _snake_case ( self : Dict):
pass
@parameterized.expand([("linear",), ("dynamic",)])
def _snake_case ( self : Tuple , UpperCAmelCase : int):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ :Tuple = ids_tensor([1, 10] , config.vocab_size)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ :int = OpenLlamaModel(UpperCAmelCase)
original_model.to(UpperCAmelCase)
original_model.eval()
SCREAMING_SNAKE_CASE_ :Dict = original_model(UpperCAmelCase).last_hidden_state
SCREAMING_SNAKE_CASE_ :List[Any] = original_model(UpperCAmelCase).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ :Dict = {"type": scaling_type, "factor": 10.0}
SCREAMING_SNAKE_CASE_ :Optional[int] = OpenLlamaModel(UpperCAmelCase)
scaled_model.to(UpperCAmelCase)
scaled_model.eval()
SCREAMING_SNAKE_CASE_ :str = scaled_model(UpperCAmelCase).last_hidden_state
SCREAMING_SNAKE_CASE_ :List[str] = scaled_model(UpperCAmelCase).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-5))
else:
self.assertFalse(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-5))
| 631
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class _UpperCAmelCase ( lowercase ):
lowerCamelCase_ : Dict = ["""pixel_values"""]
def __init__( self : Tuple , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 2_55 , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : bool = True , **UpperCAmelCase : Any , ):
super().__init__(**UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[Any] = size if size is not None else {"height": 3_84, "width": 3_84}
SCREAMING_SNAKE_CASE_ :str = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[Any] = do_resize
SCREAMING_SNAKE_CASE_ :int = size
SCREAMING_SNAKE_CASE_ :int = resample
SCREAMING_SNAKE_CASE_ :Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE_ :Union[str, Any] = rescale_factor
SCREAMING_SNAKE_CASE_ :Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE_ :List[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE_ :Dict = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE_ :List[Any] = do_convert_rgb
def _snake_case ( self : int , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Any , ):
SCREAMING_SNAKE_CASE_ :Optional[int] = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
SCREAMING_SNAKE_CASE_ :List[Any] = (size["height"], size["width"])
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase)
def _snake_case ( self : Optional[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[str] , ):
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase)
def _snake_case ( self : Optional[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ):
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase)
def _snake_case ( self : List[Any] , UpperCAmelCase : ImageInput , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[Dict[str, int]] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[float] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : bool = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : Dict , ):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ :Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ :List[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ :Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ :Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ :List[str] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ :Optional[int] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ :Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE_ :Dict = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ :Tuple = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Tuple = make_list_of_images(UpperCAmelCase)
if not valid_images(UpperCAmelCase):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [convert_to_rgb(UpperCAmelCase) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ :List[str] = [to_numpy_array(UpperCAmelCase) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ :List[str] = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase) for image in images]
SCREAMING_SNAKE_CASE_ :Optional[Any] = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase) for image in images]
SCREAMING_SNAKE_CASE_ :Optional[int] = BatchFeature(data={"pixel_values": images} , tensor_type=UpperCAmelCase)
return encoded_outputs
| 631
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase( lowerCAmelCase__ , unittest.TestCase ):
__snake_case : List[str] = XLMTokenizer
__snake_case : Tuple = False
def _lowercase ( self : Any ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ :List[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
SCREAMING_SNAKE_CASE_ :Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
SCREAMING_SNAKE_CASE_ :Any = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
SCREAMING_SNAKE_CASE_ :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE ) )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :str = 'lower newer'
SCREAMING_SNAKE_CASE_ :Optional[int] = 'lower newer'
return input_text, output_text
def _lowercase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[Any] = XLMTokenizer(self.vocab_file , self.merges_file )
SCREAMING_SNAKE_CASE_ :Tuple = 'lower'
SCREAMING_SNAKE_CASE_ :Dict = ['low', 'er</w>']
SCREAMING_SNAKE_CASE_ :Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Tuple = tokens + ['<unk>']
SCREAMING_SNAKE_CASE_ :List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[Any] = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = tokenizer.encode('sequence builders' , add_special_tokens=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = tokenizer.encode('multi-sequence build' , add_special_tokens=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[int] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 233
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class __lowerCAmelCase( lowerCAmelCase__ ):
__snake_case : Optional[Any] = 'bert-generation'
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Any=50_358 , SCREAMING_SNAKE_CASE : List[str]=1_024 , SCREAMING_SNAKE_CASE : List[Any]=24 , SCREAMING_SNAKE_CASE : Dict=16 , SCREAMING_SNAKE_CASE : Dict=4_096 , SCREAMING_SNAKE_CASE : List[Any]="gelu" , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=512 , SCREAMING_SNAKE_CASE : List[str]=0.02 , SCREAMING_SNAKE_CASE : Optional[Any]=1E-12 , SCREAMING_SNAKE_CASE : str=0 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=1 , SCREAMING_SNAKE_CASE : str="absolute" , SCREAMING_SNAKE_CASE : Tuple=True , **SCREAMING_SNAKE_CASE : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[Any] = vocab_size
SCREAMING_SNAKE_CASE_ :Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE_ :Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ :List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ :Any = hidden_act
SCREAMING_SNAKE_CASE_ :int = intermediate_size
SCREAMING_SNAKE_CASE_ :Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ :Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ :List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE_ :Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ :int = layer_norm_eps
SCREAMING_SNAKE_CASE_ :str = position_embedding_type
SCREAMING_SNAKE_CASE_ :List[str] = use_cache
| 233
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any]=7 , lowerCAmelCase : Tuple=3 , lowerCAmelCase : Optional[int]=18 , lowerCAmelCase : List[str]=30 , lowerCAmelCase : Optional[int]=4_00 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=None , lowerCAmelCase : str=True , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , lowerCAmelCase : Any=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , lowerCAmelCase : Optional[Any]=True , ) -> List[Any]:
"""simple docstring"""
lowercase__ = size if size is not None else {'height': 2_24, 'width': 2_24}
lowercase__ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
lowercase__ = do_convert_rgb
def UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCAmelCase ( self : Any , lowerCAmelCase : Tuple=False , lowerCAmelCase : Any=False , lowerCAmelCase : Union[str, Any]=False) -> Optional[int]:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowercase__ = []
for i in range(self.batch_size):
image_inputs.append(
np.random.randint(
2_55 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta))
else:
lowercase__ = []
for i in range(self.batch_size):
lowercase__, lowercase__ = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2)
image_inputs.append(np.random.randint(2_55 , size=(self.num_channels, width, height) , dtype=np.uinta))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowercase__ = [Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1)) for x in image_inputs]
if torchify:
lowercase__ = [torch.from_numpy(lowerCAmelCase) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : int = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
lowercase__ = ChineseCLIPImageProcessingTester(self , do_center_crop=lowerCAmelCase)
@property
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase , 'do_resize'))
self.assertTrue(hasattr(lowerCAmelCase , 'size'))
self.assertTrue(hasattr(lowerCAmelCase , 'do_center_crop'))
self.assertTrue(hasattr(lowerCAmelCase , 'center_crop'))
self.assertTrue(hasattr(lowerCAmelCase , 'do_normalize'))
self.assertTrue(hasattr(lowerCAmelCase , 'image_mean'))
self.assertTrue(hasattr(lowerCAmelCase , 'image_std'))
self.assertTrue(hasattr(lowerCAmelCase , 'do_convert_rgb'))
def UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 2_24, 'width': 2_24})
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18})
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'shortest_edge': 42})
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84})
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__ = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image)
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ = image_processing(lowerCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase__ = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray)
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ = image_processing(lowerCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase__ = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor)
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ = image_processing(lowerCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Dict = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=lowerCAmelCase)
lowercase__ = 3
@property
def UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase , 'do_resize'))
self.assertTrue(hasattr(lowerCAmelCase , 'size'))
self.assertTrue(hasattr(lowerCAmelCase , 'do_center_crop'))
self.assertTrue(hasattr(lowerCAmelCase , 'center_crop'))
self.assertTrue(hasattr(lowerCAmelCase , 'do_normalize'))
self.assertTrue(hasattr(lowerCAmelCase , 'image_mean'))
self.assertTrue(hasattr(lowerCAmelCase , 'image_std'))
self.assertTrue(hasattr(lowerCAmelCase , 'do_convert_rgb'))
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Tuple) -> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__ = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image)
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ = image_processing(lowerCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 622
|
'''simple docstring'''
def lowerCamelCase__ ( A : int = 50 ):
'''simple docstring'''
UpperCAmelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 210
| 0
|
"""simple docstring"""
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> list:
"""simple docstring"""
__snake_case = len(SCREAMING_SNAKE_CASE )
for i in range(1 , SCREAMING_SNAKE_CASE ):
__snake_case = collection[i]
__snake_case = 0
__snake_case = i - 1
while low <= high:
__snake_case = (low + high) // 2
if val < collection[mid]:
__snake_case = mid - 1
else:
__snake_case = mid + 1
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , -1 ):
__snake_case = collection[j - 1]
__snake_case = val
return collection
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip()
_SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 614
|
"""simple docstring"""
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> list:
"""simple docstring"""
__snake_case = len(SCREAMING_SNAKE_CASE )
for i in range(1 , SCREAMING_SNAKE_CASE ):
__snake_case = collection[i]
__snake_case = 0
__snake_case = i - 1
while low <= high:
__snake_case = (low + high) // 2
if val < collection[mid]:
__snake_case = mid - 1
else:
__snake_case = mid + 1
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , -1 ):
__snake_case = collection[j - 1]
__snake_case = val
return collection
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip()
_SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 614
| 1
|
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_A = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
_A = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
_A = r"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def A ( self : Union[str, Any] )-> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def A ( self : List[Any] , A_ : Optional[Any] , A_ : Tuple )-> List[Any]:
__UpperCamelCase = 0.0
for i, j in zip(A_ , A_ ):
n_correct += 1.0 if math_equivalence.is_equiv(A_ , A_ ) else 0.0
__UpperCamelCase = n_correct / len(A_ )
return {
"accuracy": accuracy,
}
| 505
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 505
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''roformer'''
def __init__( self , _a=50000 , _a=None , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1536 , _a=2 , _a=0.0_2 , _a=1E-12 , _a=0 , _a=False , _a=True , **_a , ) -> Optional[Any]:
super().__init__(pad_token_id=_a , **_a )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size if embedding_size is None else embedding_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = rotary_value
lowerCAmelCase_ = use_cache
class __magic_name__ (__lowercase ):
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase_ = {0: "batch", 1: "sequence"}
lowerCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 226
|
import argparse
import json
from tqdm import tqdm
def A():
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=__a , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=__a , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=__a , help="where to store parsed gold_data_path file" , )
lowerCAmelCase_ = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
lowerCAmelCase_ = json.load(__a )
for dpr_record in tqdm(__a ):
lowerCAmelCase_ = dpr_record["question"]
lowerCAmelCase_ = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(__a ) + "\n" )
if __name__ == "__main__":
main()
| 226
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Tuple = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( _lowerCamelCase , unittest.TestCase ):
snake_case_ = CLIPTokenizer
snake_case_ = CLIPTokenizerFast
snake_case_ = True
snake_case_ = {}
snake_case_ = False
def A_ ( self : List[Any] ):
super().setUp()
# fmt: off
snake_case_ = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
snake_case_ = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
snake_case_ = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
snake_case_ = {'''unk_token''': '''<unk>'''}
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase_ ) )
def A_ ( self : Tuple , **lowercase_ : Tuple ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def A_ ( self : List[str] , **lowercase_ : Dict ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def A_ ( self : Optional[int] , lowercase_ : Optional[int] ):
snake_case_ = '''lower newer'''
snake_case_ = '''lower newer'''
return input_text, output_text
def A_ ( self : Optional[int] ):
snake_case_ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ = '''lower newer'''
snake_case_ = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
snake_case_ = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ = tokens + [tokenizer.unk_token]
snake_case_ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
@require_ftfy
def A_ ( self : Optional[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case_ = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
snake_case_ = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
snake_case_ = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
snake_case_ = tokenizer_s.tokenize(lowercase_ )
snake_case_ = tokenizer_r.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
snake_case_ = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
snake_case_ = tokenizer_s.tokenize(lowercase_ )
snake_case_ = tokenizer_r.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Test that the tokenization is identical on unicode of space type
snake_case_ = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
snake_case_ = tokenizer_s.tokenize(lowercase_ )
snake_case_ = tokenizer_r.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Test that the tokenization is identical on unicode of line break type
snake_case_ = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
snake_case_ = tokenizer_s.tokenize(lowercase_ )
snake_case_ = tokenizer_r.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def A_ ( self : List[str] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case_ = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
snake_case_ = F"{text_of_1_token} {text_of_1_token}"
snake_case_ = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , )
snake_case_ = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ) + 1, len(lowercase_ ) + 1 + len(lowercase_ )) , )
snake_case_ = F" {text}"
snake_case_ = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , )
snake_case_ = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ) + 1, 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
def A_ ( self : Optional[Any] ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowercase_ ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def A_ ( self : List[str] ):
super().test_tokenization_python_rust_equals()
def A_ ( self : List[Any] ):
# CLIP always lower cases letters
pass
| 640
| 1
|
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowercase :
def __init__( self : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any=13 , __lowerCAmelCase : str=7 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=99 , __lowerCAmelCase : Any=32 , __lowerCAmelCase : Union[str, Any]=5 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Union[str, Any]=37 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Union[str, Any]=50 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : List[str]=None , ) -> Optional[int]:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = initializer_range
lowercase_ = use_labels
lowercase_ = scope
def __UpperCAmelCase ( self : List[Any]) -> Dict:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length])
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = self.get_config()
return config, input_ids, input_mask, token_labels
def __UpperCAmelCase ( self : Any) -> List[Any]:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=A__ , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : Any) -> int:
(
lowercase_
) = self.prepare_config_and_inputs()
lowercase_ = True
lowercase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , **__lowerCAmelCase : List[Any] , ) -> Dict:
lowercase_ = BertGenerationEncoder(config=A__)
model.to(A__)
model.eval()
lowercase_ = model(A__ , attention_mask=A__)
lowercase_ = model(A__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCAmelCase ( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Tuple , ) -> Union[str, Any]:
lowercase_ = True
lowercase_ = BertGenerationEncoder(config=A__)
model.to(A__)
model.eval()
lowercase_ = model(
A__ , attention_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , )
lowercase_ = model(
A__ , attention_mask=A__ , encoder_hidden_states=A__ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Union[str, Any] , ) -> Any:
lowercase_ = True
lowercase_ = True
lowercase_ = BertGenerationDecoder(config=A__).to(A__).eval()
# first forward pass
lowercase_ = model(
A__ , attention_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , use_cache=A__ , )
lowercase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
lowercase_ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1)
lowercase_ = torch.cat([input_mask, next_mask] , dim=-1)
lowercase_ = model(
A__ , attention_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , output_hidden_states=A__ , )["""hidden_states"""][0]
lowercase_ = model(
A__ , attention_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , past_key_values=A__ , output_hidden_states=A__ , )["""hidden_states"""][0]
# select random slice
lowercase_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
lowercase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A__ , A__ , atol=1e-3))
def __UpperCAmelCase ( self : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , *__lowerCAmelCase : int , ) -> List[str]:
lowercase_ = BertGenerationDecoder(A__)
model.to(A__)
model.eval()
lowercase_ = model(A__ , attention_mask=A__ , labels=A__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __UpperCAmelCase ( self : int) -> Dict:
lowercase_ = self.prepare_config_and_inputs()
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
lowerCamelCase_ =(BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowerCamelCase_ =(BertGenerationDecoder,) if is_torch_available() else ()
lowerCamelCase_ =(
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
lowercase_ = BertGenerationEncoderTester(self)
lowercase_ = ConfigTester(self , config_class=A__ , hidden_size=37)
def __UpperCAmelCase ( self : List[str]) -> List[str]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__)
def __UpperCAmelCase ( self : int) -> str:
lowercase_ = self.model_tester.prepare_config_and_inputs()
lowercase_ = """bert"""
self.model_tester.create_and_check_model(A__ , A__ , A__ , A__)
def __UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
lowercase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A__)
def __UpperCAmelCase ( self : List[Any]) -> Dict:
lowercase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*A__)
def __UpperCAmelCase ( self : List[Any]) -> Optional[int]:
# This regression test was failing with PyTorch < 1.3
(
lowercase_
) = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase_ = None
self.model_tester.create_and_check_model_as_decoder(
A__ , A__ , A__ , A__ , A__ , A__ , )
def __UpperCAmelCase ( self : str) -> int:
lowercase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*A__)
@slow
def __UpperCAmelCase ( self : List[str]) -> Dict:
lowercase_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
self.assertIsNotNone(A__)
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : List[Any]) -> int:
lowercase_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
lowercase_ = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]])
with torch.no_grad():
lowercase_ = model(A__)[0]
lowercase_ = torch.Size([1, 8, 1024])
self.assertEqual(output.shape , A__)
lowercase_ = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , A__ , atol=1e-4))
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : Tuple) -> str:
lowercase_ = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
lowercase_ = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]])
with torch.no_grad():
lowercase_ = model(A__)[0]
lowercase_ = torch.Size([1, 8, 5_0358])
self.assertEqual(output.shape , A__)
lowercase_ = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , A__ , atol=1e-4))
| 720
|
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCAmelCase_ : Dict = TypeVar("T")
class lowercase ( Generic[T] ):
def __init__( self : int , __lowerCAmelCase : bool = True) -> None:
lowercase_ = {} # dictionary of lists
lowercase_ = directed
def __UpperCAmelCase ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : T) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCAmelCase)
self.adj_list[destination_vertex].append(__lowerCAmelCase)
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCAmelCase)
lowercase_ = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__lowerCAmelCase)
lowercase_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowercase_ = [destination_vertex]
lowercase_ = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCAmelCase)
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCAmelCase)
lowercase_ = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowercase_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowercase_ = [destination_vertex]
lowercase_ = []
return self
def __repr__( self : str) -> str:
return pformat(self.adj_list)
| 461
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_lowerCAmelCase :Tuple = logging.get_logger(__name__)
class UpperCAmelCase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , *lowercase__ , **lowercase__ ) -> str:
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , A__ , )
super().__init__(*A__ , **A__ )
| 251
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ :Optional[int] = logging.get_logger(__name__)
UpperCAmelCase__ :Union[str, Any] = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : int = 'wavlm'
def __init__( self : List[str] , A__ : int=32 , A__ : str=768 , A__ : List[Any]=12 , A__ : Any=12 , A__ : Any=3072 , A__ : Union[str, Any]="gelu" , A__ : Union[str, Any]=0.1 , A__ : Optional[Any]=0.1 , A__ : str=0.1 , A__ : List[str]=0.0 , A__ : List[str]=0.1 , A__ : Optional[Any]=0.1 , A__ : List[str]=0.02 , A__ : str=1e-5 , A__ : List[Any]="group" , A__ : Optional[int]="gelu" , A__ : int=(512, 512, 512, 512, 512, 512, 512) , A__ : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , A__ : str=(10, 3, 3, 3, 3, 2, 2) , A__ : List[Any]=False , A__ : str=128 , A__ : Union[str, Any]=16 , A__ : Optional[Any]=320 , A__ : List[Any]=800 , A__ : Union[str, Any]=False , A__ : Optional[int]=True , A__ : Union[str, Any]=0.05 , A__ : Any=10 , A__ : Any=2 , A__ : List[Any]=0.0 , A__ : List[Any]=10 , A__ : int=320 , A__ : Tuple=2 , A__ : Optional[int]=0.1 , A__ : Tuple=100 , A__ : Tuple=256 , A__ : int=256 , A__ : Dict=0.1 , A__ : Union[str, Any]="mean" , A__ : int=False , A__ : Optional[int]=False , A__ : List[str]=256 , A__ : Optional[Any]=(512, 512, 512, 512, 1500) , A__ : List[str]=(5, 3, 3, 1, 1) , A__ : Any=(1, 2, 3, 1, 1) , A__ : List[Any]=512 , A__ : List[str]=80 , A__ : List[str]=0 , A__ : List[Any]=1 , A__ : Optional[int]=2 , A__ : List[Any]=False , A__ : int=3 , A__ : Union[str, Any]=2 , A__ : int=3 , A__ : Optional[Any]=None , **A__ : str , ):
"""simple docstring"""
super().__init__(**A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ )
__lowerCamelCase : Optional[Any] = hidden_size
__lowerCamelCase : List[Any] = feat_extract_norm
__lowerCamelCase : Union[str, Any] = feat_extract_activation
__lowerCamelCase : Union[str, Any] = list(A__ )
__lowerCamelCase : Any = list(A__ )
__lowerCamelCase : Dict = list(A__ )
__lowerCamelCase : int = conv_bias
__lowerCamelCase : Optional[Any] = num_buckets
__lowerCamelCase : Union[str, Any] = max_bucket_distance
__lowerCamelCase : Optional[Any] = num_conv_pos_embeddings
__lowerCamelCase : Dict = num_conv_pos_embedding_groups
__lowerCamelCase : List[str] = len(self.conv_dim )
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : int = hidden_act
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : Tuple = hidden_dropout
__lowerCamelCase : Any = attention_dropout
__lowerCamelCase : Optional[int] = activation_dropout
__lowerCamelCase : Optional[Any] = feat_proj_dropout
__lowerCamelCase : List[Any] = final_dropout
__lowerCamelCase : Tuple = layerdrop
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : Optional[Any] = num_ctc_classes
__lowerCamelCase : Tuple = vocab_size
__lowerCamelCase : int = do_stable_layer_norm
__lowerCamelCase : Dict = use_weighted_layer_sum
__lowerCamelCase : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase : str = apply_spec_augment
__lowerCamelCase : Tuple = mask_time_prob
__lowerCamelCase : Any = mask_time_length
__lowerCamelCase : Optional[int] = mask_time_min_masks
__lowerCamelCase : int = mask_feature_prob
__lowerCamelCase : Tuple = mask_feature_length
# parameters for pretraining with codevector quantized representations
__lowerCamelCase : Optional[Any] = num_codevectors_per_group
__lowerCamelCase : int = num_codevector_groups
__lowerCamelCase : Optional[int] = contrastive_logits_temperature
__lowerCamelCase : Optional[int] = num_negatives
__lowerCamelCase : str = codevector_dim
__lowerCamelCase : Dict = proj_codevector_dim
__lowerCamelCase : List[str] = diversity_loss_weight
# ctc loss
__lowerCamelCase : Union[str, Any] = ctc_loss_reduction
__lowerCamelCase : Tuple = ctc_zero_infinity
# adapter
__lowerCamelCase : List[str] = add_adapter
__lowerCamelCase : str = adapter_kernel_size
__lowerCamelCase : List[str] = adapter_stride
__lowerCamelCase : Tuple = num_adapter_layers
__lowerCamelCase : Union[str, Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCamelCase : Any = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCamelCase : List[str] = list(A__ )
__lowerCamelCase : str = list(A__ )
__lowerCamelCase : Dict = list(A__ )
__lowerCamelCase : Optional[int] = xvector_output_dim
@property
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 150
| 0
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __UpperCamelCase ( A , A , A , A ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
def __UpperCamelCase ( A , A , A , A , A=True ):
model.train()
UpperCamelCase__ = model(lowerCAmelCase__ )
UpperCamelCase__ = F.mse_loss(lowerCAmelCase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCAmelCase__ )
def __UpperCamelCase ( A , A=False ):
set_seed(42 )
UpperCamelCase__ = RegressionModel()
UpperCamelCase__ = deepcopy(lowerCAmelCase__ )
UpperCamelCase__ = RegressionDataset(length=80 )
UpperCamelCase__ = DataLoader(lowerCAmelCase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCamelCase__ = AdamW(params=model.parameters() , lr=1e-3 )
UpperCamelCase__ = AdamW(params=ddp_model.parameters() , lr=1e-3 )
UpperCamelCase__ = LambdaLR(lowerCAmelCase__ , lr_lambda=lambda A : epoch**0.65 )
UpperCamelCase__ = LambdaLR(lowerCAmelCase__ , lr_lambda=lambda A : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
UpperCamelCase__ , UpperCamelCase__ = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __UpperCamelCase ( A ):
# Test when on a single CPU or GPU that the context manager does nothing
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = get_training_setup(lowerCAmelCase__ )
# Use a single batch
UpperCamelCase__ , UpperCamelCase__ = next(iter(lowerCAmelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCamelCase__ , UpperCamelCase__ = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase__ , UpperCamelCase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase__ ):
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
# Sync grads
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCamelCase__ = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )]
def __UpperCamelCase ( A ):
# Test on distributed setup that context manager behaves properly
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = get_training_setup(lowerCAmelCase__ )
# Use a single batch
UpperCamelCase__ , UpperCamelCase__ = next(iter(lowerCAmelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCamelCase__ , UpperCamelCase__ = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase__ , UpperCamelCase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase__ ):
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
# Sync grads
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCamelCase__ = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )]
def __UpperCamelCase ( A=False , A=False ):
UpperCamelCase__ = Accelerator(
split_batches=lowerCAmelCase__ , dispatch_batches=lowerCAmelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = get_training_setup(lowerCAmelCase__ )
for iteration, batch in enumerate(lowerCAmelCase__ ):
UpperCamelCase__ , UpperCamelCase__ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCamelCase__ , UpperCamelCase__ = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase__ , UpperCamelCase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCAmelCase__ ):
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCAmelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCamelCase__ = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )]
GradientState._reset_state()
def __UpperCamelCase ( A=False , A=False ):
UpperCamelCase__ = Accelerator(
split_batches=lowerCAmelCase__ , dispatch_batches=lowerCAmelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = get_training_setup(lowerCAmelCase__ , lowerCAmelCase__ )
for iteration, batch in enumerate(lowerCAmelCase__ ):
UpperCamelCase__ , UpperCamelCase__ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCamelCase__ , UpperCamelCase__ = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase__ , UpperCamelCase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCAmelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCAmelCase__ ):
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"
UpperCamelCase__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCAmelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __UpperCamelCase ( ):
UpperCamelCase__ = Accelerator()
UpperCamelCase__ = RegressionDataset(length=80 )
UpperCamelCase__ = DataLoader(lowerCAmelCase__ , batch_size=16 )
UpperCamelCase__ = RegressionDataset(length=96 )
UpperCamelCase__ = DataLoader(lowerCAmelCase__ , batch_size=16 )
UpperCamelCase__ , UpperCamelCase__ = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCAmelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ )
if iteration < len(lowerCAmelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCAmelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ )
if batch_num < len(lowerCAmelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __UpperCamelCase ( ):
UpperCamelCase__ = Accelerator()
UpperCamelCase__ = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(lowerCAmelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(lowerCAmelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation(lowerCAmelCase__ , lowerCAmelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation_with_opt_and_scheduler(lowerCAmelCase__ , lowerCAmelCase__ )
def __UpperCamelCase ( A ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 706
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __UpperCamelCase ( A , A ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
UpperCamelCase__ = flax_key_tuple[:-1] + ('''weight''',)
UpperCamelCase__ = torch.permute(A , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(A ):
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ('''weight''',)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def __UpperCamelCase ( A , A , A ):
if "metadata" in layer:
UpperCamelCase__ = layer.split('''metadata''' )
UpperCamelCase__ = ''''''.join(split_layer[0] )[:-1]
UpperCamelCase__ = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
UpperCamelCase__ = layer.split('''kvstore''' )
UpperCamelCase__ = ''''''.join(split_layer[0] )[:-1]
UpperCamelCase__ = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
UpperCamelCase__ = layer.split('''/''' )
UpperCamelCase__ = '''/'''.join(split_layer[:-1] )
UpperCamelCase__ = (split_layer[-1],)
if "kvstore/path" in layer:
UpperCamelCase__ = f"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
UpperCamelCase__ = '''file'''
else:
UpperCamelCase__ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __UpperCamelCase ( A , A ):
UpperCamelCase__ = rename_keys(A )
UpperCamelCase__ = {}
for k, v in current_block.items():
UpperCamelCase__ = v
UpperCamelCase__ = new_current_block
torch.save(A , A )
def __UpperCamelCase ( A , A , A , A , A = WEIGHTS_NAME ):
UpperCamelCase__ = convert_file_size_to_int(A )
UpperCamelCase__ = []
UpperCamelCase__ = {}
UpperCamelCase__ = 0
UpperCamelCase__ = 0
os.makedirs(A , exist_ok=A )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
UpperCamelCase__ = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
UpperCamelCase__ = flatten_dict(A , sep='''/''' )
UpperCamelCase__ = {}
for layer in checkpoint_info.keys():
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = get_key_and_tensorstore_dict(
A , A , A )
if curr_real_layer_name in all_layers:
UpperCamelCase__ = content
else:
UpperCamelCase__ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
UpperCamelCase__ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
UpperCamelCase__ = torch.tensor(A )
UpperCamelCase__ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
UpperCamelCase__ , UpperCamelCase__ = rename_base_flax_keys(tuple(key.split('''/''' ) ) , A )
UpperCamelCase__ = '''/'''.join(A )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
UpperCamelCase__ = os.path.join(
A , weights_name.replace('''.bin''' , f"-{len(A )+1:05d}-of-???.bin" ) )
rename_and_save_block(A , A )
sharded_state_dicts.append(current_block.keys() )
del current_block
UpperCamelCase__ = {}
UpperCamelCase__ = 0
UpperCamelCase__ = raw_weights.to(getattr(A , A ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
UpperCamelCase__ = os.path.join(A , weights_name.replace('''.bin''' , f"-{len(A )+1:05d}-of-???.bin" ) )
rename_and_save_block(A , A )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(A ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
UpperCamelCase__ = {}
UpperCamelCase__ = {}
for idx, shard in enumerate(A ):
UpperCamelCase__ = weights_name.replace(
'''.bin''' , f"-{idx+1:05d}-of-{len(A ):05d}.bin" ) # len(sharded_state_dicts):05d}
UpperCamelCase__ = os.path.join(A , weights_name.replace('''.bin''' , f"-{idx+1:05d}-of-???.bin" ) )
os.rename(A , os.path.join(A , A ) )
UpperCamelCase__ = shard
for key in shard:
UpperCamelCase__ = shard_file
# Add the metadata
UpperCamelCase__ = {'''total_size''': total_size}
UpperCamelCase__ = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(A , A ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCamelCase__ = json.dumps(A , indent=2 , sort_keys=A ) + '''\n'''
f.write(A )
return metadata, index
if __name__ == "__main__":
__magic_name__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
__magic_name__ =parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __UpperCamelCase ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
UpperCamelCase__ = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
UpperCamelCase__ = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
UpperCamelCase__ = TaTokenizer.from_pretrained('''t5-small''' )
UpperCamelCase__ = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
UpperCamelCase__ = tokenizer(A , return_tensors='''pt''' ).input_ids
UpperCamelCase__ = model.generate(A , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 469
| 0
|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_lowercase : Any = Lock()
def lowerCamelCase__ ( A : List[Any] , A : Union[str, Any] , A : Tuple , A : Optional[int] , A : str , A : Dict , A : List[str] ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(A )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase = min(A , A )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(A )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase = max(A , A )
# after all swaps are performed, send the values back to main
result_pipe[1].send(A )
def lowerCamelCase__ ( A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase = Pipe()
UpperCAmelCase = Pipe()
process_array_.append(
Process(
target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase = temp_rs
UpperCAmelCase = temp_rr
for i in range(1 , len(A ) - 1 ):
UpperCAmelCase = Pipe()
UpperCAmelCase = Pipe()
process_array_.append(
Process(
target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase = temp_rs
UpperCAmelCase = temp_rr
process_array_.append(
Process(
target=A , args=(
len(A ) - 1,
arr[len(A ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(A ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(A ) ):
UpperCAmelCase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*A )
UpperCAmelCase = odd_even_transposition(A )
print('''Sorted List\n''' )
print(*A )
if __name__ == "__main__":
main()
| 210
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Optional[int] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = 10
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = [1, 2, 3, 4]
UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowerCAmelCase , self.block_size , 0 ) , lowerCAmelCase )
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCAmelCase , self.block_size , 0 ) , lowerCAmelCase )
def a__( self : str )-> Dict:
"""simple docstring"""
UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCAmelCase , self.block_size , 0 ) , lowerCAmelCase )
def a__( self : Any )-> Dict:
"""simple docstring"""
UpperCAmelCase = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCAmelCase , UpperCAmelCase = process_story(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , [] )
def a__( self : Dict )-> Dict:
"""simple docstring"""
UpperCAmelCase = ''''''
UpperCAmelCase , UpperCAmelCase = process_story(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , [] )
self.assertEqual(lowerCAmelCase , [] )
def a__( self : Optional[int] )-> int:
"""simple docstring"""
UpperCAmelCase = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCAmelCase , UpperCAmelCase = process_story(lowerCAmelCase )
UpperCAmelCase = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = ['''It was the best of times.''']
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = torch.tensor([1, 2, 3, 4] )
UpperCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase , 0 ).numpy() , expected.numpy() )
def a__( self : str )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase , 23 ).numpy() , expected.numpy() )
def a__( self : Any )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase , 1 ).numpy() , expected.numpy() )
def a__( self : int )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = 101
UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
UpperCAmelCase = compute_token_type_ids(lowerCAmelCase , lowerCAmelCase )
np.testing.assert_array_equal(lowerCAmelCase , lowerCAmelCase )
| 210
| 1
|
from __future__ import annotations
snake_case__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
snake_case__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowerCamelCase__ ( a : list[float] ) -> list[float]:
"""simple docstring"""
a__ :Tuple = []
a__ :Tuple = len(a )
for i in range(a ):
a__ :float = -1
for j in range(i + 1 , a ):
if arr[i] < arr[j]:
a__ :List[Any] = arr[j]
break
result.append(a )
return result
def lowerCamelCase__ ( a : list[float] ) -> list[float]:
"""simple docstring"""
a__ :int = []
for i, outer in enumerate(a ):
a__ :float = -1
for inner in arr[i + 1 :]:
if outer < inner:
a__ :Dict = inner
break
result.append(a )
return result
def lowerCamelCase__ ( a : list[float] ) -> list[float]:
"""simple docstring"""
a__ :List[Any] = len(a )
a__ :list[float] = []
a__ :list[float] = [-1] * arr_size
for index in reversed(range(a ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
a__ :Dict = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
snake_case__ = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 373
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = 'pegasus'
lowerCamelCase_ = ['past_key_values']
lowerCamelCase_ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Any , __A : Dict=50265 , __A : List[Any]=1024 , __A : int=12 , __A : Optional[Any]=4096 , __A : Optional[int]=16 , __A : Dict=12 , __A : List[Any]=4096 , __A : List[str]=16 , __A : Optional[int]=0.0 , __A : List[Any]=0.0 , __A : List[str]=True , __A : Optional[int]=True , __A : str="gelu" , __A : Tuple=1024 , __A : Any=0.1 , __A : List[Any]=0.0 , __A : List[str]=0.0 , __A : Tuple=0.02 , __A : Union[str, Any]=0 , __A : Union[str, Any]=False , __A : Optional[Any]=0 , __A : Tuple=1 , __A : str=1 , **__A : Any , ) ->Union[str, Any]:
"""simple docstring"""
a__ :Any = vocab_size
a__ :List[str] = max_position_embeddings
a__ :int = d_model
a__ :Union[str, Any] = encoder_ffn_dim
a__ :List[Any] = encoder_layers
a__ :Union[str, Any] = encoder_attention_heads
a__ :Tuple = decoder_ffn_dim
a__ :List[Any] = decoder_layers
a__ :Tuple = decoder_attention_heads
a__ :Optional[int] = dropout
a__ :str = attention_dropout
a__ :Optional[int] = activation_dropout
a__ :str = activation_function
a__ :Dict = init_std
a__ :Any = encoder_layerdrop
a__ :int = decoder_layerdrop
a__ :Union[str, Any] = use_cache
a__ :List[Any] = encoder_layers
a__ :Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , **__A , )
@property
def _snake_case ( self : Optional[Any] ) ->int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _snake_case ( self : Union[str, Any] ) ->int:
"""simple docstring"""
return self.d_model
| 373
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=30 , lowerCamelCase__=400 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=1 / 255 , lowerCamelCase__=True , lowerCamelCase__=[0.5, 0.5, 0.5] , lowerCamelCase__=[0.5, 0.5, 0.5] , lowerCamelCase__=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase_: Any = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
lowerCAmelCase_: List[str] = parent
lowerCAmelCase_: Any = batch_size
lowerCAmelCase_: Dict = num_channels
lowerCAmelCase_: Any = min_resolution
lowerCAmelCase_: str = max_resolution
lowerCAmelCase_: Dict = do_resize
lowerCAmelCase_: Optional[int] = size
lowerCAmelCase_: Dict = do_rescale
lowerCAmelCase_: Optional[Any] = rescale_factor
lowerCAmelCase_: List[str] = do_normalize
lowerCAmelCase_: Union[str, Any] = image_mean
lowerCAmelCase_: Optional[Any] = image_std
lowerCAmelCase_: Optional[int] = do_pad
def _a ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def _a ( self , lowerCamelCase__ , lowerCamelCase__=False ):
if not batched:
lowerCAmelCase_: Optional[int] = image_inputs[0]
if isinstance(lowerCamelCase__ , Image.Image ):
lowerCAmelCase_ , lowerCAmelCase_: Any = image.size
else:
lowerCAmelCase_ , lowerCAmelCase_: Tuple = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase_: Dict = int(self.size["shortest_edge"] * h / w )
lowerCAmelCase_: List[str] = self.size["shortest_edge"]
elif w > h:
lowerCAmelCase_: Union[str, Any] = self.size["shortest_edge"]
lowerCAmelCase_: List[str] = int(self.size["shortest_edge"] * w / h )
else:
lowerCAmelCase_: Optional[Any] = self.size["shortest_edge"]
lowerCAmelCase_: int = self.size["shortest_edge"]
else:
lowerCAmelCase_: List[str] = []
for image in image_inputs:
lowerCAmelCase_ , lowerCAmelCase_: Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase_: Dict = max(lowerCamelCase__ , key=lambda lowerCamelCase__ : item[0] )[0]
lowerCAmelCase_: List[Any] = max(lowerCamelCase__ , key=lambda lowerCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowercase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Optional[Any] = DetrImageProcessor if is_vision_available() else None
def _a ( self ):
lowerCAmelCase_: Optional[int] = DetrImageProcessingTester(self )
@property
def _a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ):
lowerCAmelCase_: str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "rescale_factor" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "do_pad" ) )
def _a ( self ):
lowerCAmelCase_: Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase__ )
lowerCAmelCase_: str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowerCamelCase__ )
def _a ( self ):
pass
def _a ( self ):
# Initialize image_processing
lowerCAmelCase_: List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_: str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
lowerCAmelCase_: Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_: Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ , lowerCAmelCase_: Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
lowerCAmelCase_: List[str] = image_processing(lowerCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ):
# Initialize image_processing
lowerCAmelCase_: List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_: int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
lowerCAmelCase_: Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_: Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_: Optional[Any] = image_processing(lowerCamelCase__ , return_tensors="pt" ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_: str = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ):
# Initialize image_processing
lowerCAmelCase_: Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_: Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
lowerCAmelCase_: Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_: str = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_: Union[str, Any] = image_processing(lowerCamelCase__ , return_tensors="pt" ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_: Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _a ( self ):
# prepare image and target
lowerCAmelCase_: int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowerCAmelCase_: Dict = json.loads(f.read() )
lowerCAmelCase_: int = {"image_id": 39_769, "annotations": target}
# encode them
lowerCAmelCase_: Dict = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" )
lowerCAmelCase_: Tuple = image_processing(images=lowerCamelCase__ , annotations=lowerCamelCase__ , return_tensors="pt" )
# verify pixel values
lowerCAmelCase_: Dict = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase__ )
lowerCAmelCase_: Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase__ , atol=1E-4 ) )
# verify area
lowerCAmelCase_: str = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase__ ) )
# verify boxes
lowerCAmelCase_: Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase__ )
lowerCAmelCase_: List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase__ , atol=1E-3 ) )
# verify image_id
lowerCAmelCase_: Dict = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase__ ) )
# verify is_crowd
lowerCAmelCase_: List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase__ ) )
# verify class_labels
lowerCAmelCase_: str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase__ ) )
# verify orig_size
lowerCAmelCase_: Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase__ ) )
# verify size
lowerCAmelCase_: Union[str, Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase__ ) )
@slow
def _a ( self ):
# prepare image, target and masks_path
lowerCAmelCase_: List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowerCAmelCase_: List[str] = json.loads(f.read() )
lowerCAmelCase_: Union[str, Any] = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
lowerCAmelCase_: Tuple = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowerCAmelCase_: List[Any] = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" )
lowerCAmelCase_: List[str] = image_processing(images=lowerCamelCase__ , annotations=lowerCamelCase__ , masks_path=lowerCamelCase__ , return_tensors="pt" )
# verify pixel values
lowerCAmelCase_: Dict = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase__ )
lowerCAmelCase_: Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase__ , atol=1E-4 ) )
# verify area
lowerCAmelCase_: Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase__ ) )
# verify boxes
lowerCAmelCase_: List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase__ )
lowerCAmelCase_: List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase__ , atol=1E-3 ) )
# verify image_id
lowerCAmelCase_: int = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase__ ) )
# verify is_crowd
lowerCAmelCase_: Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase__ ) )
# verify class_labels
lowerCAmelCase_: str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase__ ) )
# verify masks
lowerCAmelCase_: Union[str, Any] = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase__ )
# verify orig_size
lowerCAmelCase_: Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase__ ) )
# verify size
lowerCAmelCase_: List[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase__ ) )
| 613
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a : int = {
"""configuration_bridgetower""": [
"""BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BridgeTowerConfig""",
"""BridgeTowerTextConfig""",
"""BridgeTowerVisionConfig""",
],
"""processing_bridgetower""": ["""BridgeTowerProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["""BridgeTowerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
"""BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BridgeTowerForContrastiveLearning""",
"""BridgeTowerForImageAndTextRetrieval""",
"""BridgeTowerForMaskedLM""",
"""BridgeTowerModel""",
"""BridgeTowerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 613
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 310
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Any , snake_case: Dict=2 , snake_case: Union[str, Any]=3 , snake_case: Dict=64 , snake_case: Union[str, Any]=None ) -> Union[str, Any]:
snake_case_ :List[Any] = np.random.default_rng(snake_case )
snake_case_ :Optional[Any] = length
snake_case_ :str = rng.normal(size=(length,) ).astype(np.floataa )
snake_case_ :Optional[int] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self: Any ) -> Union[str, Any]:
return self.length
def __getitem__( self: Optional[int] , snake_case: Union[str, Any] ) -> Optional[Any]:
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self: int , snake_case: Optional[Any]=0 , snake_case: Tuple=0 , snake_case: List[Any]=False ) -> Optional[int]:
super().__init__()
snake_case_ :str = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
snake_case_ :Any = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
snake_case_ :Tuple = True
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: Optional[Any]=None ) -> List[str]:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
snake_case_ :Union[str, Any] = False
return x * self.a[0] + self.b[0]
class lowerCamelCase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self: str , snake_case: List[Any]=0 , snake_case: Tuple=0 , snake_case: List[str]=False ) -> int:
super().__init__()
snake_case_ :int = torch.nn.Parameter(torch.tensor(snake_case ).float() )
snake_case_ :List[str] = torch.nn.Parameter(torch.tensor(snake_case ).float() )
snake_case_ :List[Any] = True
def lowerCAmelCase_ ( self: Tuple , snake_case: Optional[int]=None ) -> Union[str, Any]:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
snake_case_ :List[str] = False
return x * self.a + self.b
def A_ ( _lowercase, _lowercase = 16 ):
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
snake_case_ :Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""" )
snake_case_ :Optional[int] = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
snake_case_ :Union[str, Any] = load_dataset("""csv""", data_files=_lowercase )
snake_case_ :List[str] = datasets["""train"""].unique("""label""" )
snake_case_ :Any = {v: i for i, v in enumerate(_lowercase )}
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ :Dict = tokenizer(
examples["""sentence1"""], examples["""sentence2"""], truncation=_lowercase, max_length=_lowercase, padding="""max_length""" )
if "label" in examples:
snake_case_ :Union[str, Any] = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case_ :Any = datasets.map(
_lowercase, batched=_lowercase, remove_columns=["""sentence1""", """sentence2""", """label"""], )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase, padding="""max_length""", max_length=128, return_tensors="""pt""" )
return tokenizer.pad(_lowercase, padding="""longest""", return_tensors="""pt""" )
# Instantiate dataloaders.
snake_case_ :str = DataLoader(tokenized_datasets["""train"""], shuffle=_lowercase, collate_fn=_lowercase, batch_size=2 )
snake_case_ :Any = DataLoader(tokenized_datasets["""validation"""], shuffle=_lowercase, collate_fn=_lowercase, batch_size=1 )
return train_dataloader, eval_dataloader
| 310
| 1
|
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCAmelCase_ = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCAmelCase_ = concatenate_datasets
lowerCAmelCase_ = DownloadConfig
lowerCAmelCase_ = DownloadManager
lowerCAmelCase_ = DownloadMode
lowerCAmelCase_ = DownloadConfig
lowerCAmelCase_ = DownloadMode
lowerCAmelCase_ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 531
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case:
def __init__(self : Any , a : str , a : Union[str, Any]=12 , a : List[str]=7 , a : Dict=True , a : Tuple=True , a : Any=True , a : Optional[Any]=99 , a : Optional[Any]=32 , a : Tuple=32 , a : List[Any]=2 , a : str=4 , a : Dict=37 , a : Optional[Any]=0.1 , a : List[Any]=0.1 , a : Dict=5_12 , a : List[Any]=0.02 , a : Any=0 , a : Optional[int]=None , ) -> List[Any]:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = scope
A__ = bos_token_id
def _UpperCamelCase (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A__ = input_mask.numpy()
A__ , A__ = input_mask.shape
A__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(a ):
A__ = 1
A__ = 0
A__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(a )
def _UpperCamelCase (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCamelCase (self : Optional[int] , a : Any , a : List[Any] , a : Union[str, Any] ) -> Any:
"""simple docstring"""
A__ = TFBlipTextModel(config=a )
A__ = model(a , attention_mask=a , training=a )
A__ = model(a , training=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase (self : str ) -> List[Any]:
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _snake_case( UpperCAmelCase , unittest.TestCase ):
__snake_case: Optional[int] = (TFBlipTextModel,) if is_tf_available() else ()
__snake_case: Union[str, Any] = False
__snake_case: Any = False
__snake_case: Union[str, Any] = False
def _UpperCamelCase (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
A__ = BlipTextModelTester(self )
A__ = ConfigTester(self , config_class=a , hidden_size=37 )
def _UpperCamelCase (self : Tuple ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCamelCase (self : Any ) -> Dict:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCamelCase (self : Dict ) -> Dict:
"""simple docstring"""
pass
def _UpperCamelCase (self : str ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCamelCase (self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase (self : int ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase (self : int ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def _UpperCamelCase (self : int ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFBlipTextModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCamelCase (self : str , a : Optional[int]=True ) -> List[Any]:
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=a )
| 531
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
for attribute in key.split("." ):
snake_case__ = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
snake_case__ = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
snake_case__ = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case__ = value
elif weight_type == "weight_g":
snake_case__ = value
elif weight_type == "weight_v":
snake_case__ = value
elif weight_type == "bias":
snake_case__ = value
else:
snake_case__ = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = []
snake_case__ = fairseq_model.state_dict()
snake_case__ = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
snake_case__ = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
snake_case__ = True
if "*" in mapped_key:
snake_case__ = name.split(__lowerCAmelCase )[0].split("." )[-2]
snake_case__ = mapped_key.replace("*" , __lowerCAmelCase )
if "weight_g" in name:
snake_case__ = "weight_g"
elif "weight_v" in name:
snake_case__ = "weight_v"
elif "weight" in name:
snake_case__ = "weight"
elif "bias" in name:
snake_case__ = "bias"
else:
snake_case__ = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = full_name.split("conv_layers." )[-1]
snake_case__ = name.split("." )
snake_case__ = int(items[0] )
snake_case__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True ):
if config_path is not None:
snake_case__ = HubertConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case__ = HubertConfig()
if is_finetuned:
if dict_path:
snake_case__ = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case__ = target_dict.pad_index
snake_case__ = target_dict.bos_index
snake_case__ = target_dict.eos_index
snake_case__ = len(target_dict.symbols )
snake_case__ = os.path.join(__lowerCAmelCase , "vocab.json" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
snake_case__ = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__lowerCAmelCase , )
snake_case__ = True if config.feat_extract_norm == "layer" else False
snake_case__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
snake_case__ = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
snake_case__ = HubertForCTC(__lowerCAmelCase )
else:
snake_case__ = HubertModel(__lowerCAmelCase )
if is_finetuned:
snake_case__ , snake_case__ , snake_case__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
snake_case__ , snake_case__ , snake_case__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case__ = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__magic_name__ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 717
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase = "laptop" ):
snake_case__ = F"""https://www.amazon.in/laptop/s?k={product}"""
snake_case__ = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
snake_case__ = BeautifulSoup(requests.get(__lowerCAmelCase , headers=__lowerCAmelCase ).text )
# Initialize a Pandas dataframe with the column titles
snake_case__ = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
snake_case__ = item.ha.text
snake_case__ = "https://www.amazon.in/" + item.ha.a["href"]
snake_case__ = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
snake_case__ = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
snake_case__ = "Not available"
try:
snake_case__ = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
snake_case__ = ""
try:
snake_case__ = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
snake_case__ = float("nan" )
except AttributeError:
pass
snake_case__ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case__ = " "
snake_case__ = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__magic_name__ = '''headphones'''
get_amazon_product_data(product).to_csv(F'''Amazon Product Data for {product}.csv''')
| 530
| 0
|
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase (__A ):
'''simple docstring'''
_UpperCamelCase : List[Any] = 'new-model'
if is_tf_available():
class __UpperCAmelCase (__A ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = NewModelConfig
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : int = """bert-base-cased"""
A__ : int = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
A__ : Union[str, Any] = TFAutoModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : List[str] = """bert-base-cased"""
A__ : Union[str, Any] = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
A__ : List[Any] = TFAutoModelForPreTraining.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Optional[Any] = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
A__ : Tuple = TFAutoModelForCausalLM.from_pretrained(snake_case_ )
A__ , A__ : int = TFAutoModelForCausalLM.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
A__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
A__ : str = TFAutoModelForMaskedLM.from_pretrained(snake_case_ )
A__ , A__ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
A__ : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(snake_case_ )
A__ , A__ : Any = TFAutoModelForSeqaSeqLM.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
A__ : List[Any] = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
A__ : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
A__ : Tuple = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
A__ : Any = TFAutoModelForQuestionAnswering.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
@require_tensorflow_probability
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
A__ : Dict = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
A__ : List[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(snake_case_ )
A__ , A__ : Optional[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(
snake_case_ , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Optional[int] = TFAutoModelWithLMHead.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case_ ) , 14_410 )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case_ ) , 14_410 )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Optional[Any] = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(snake_case_ , snake_case_ )
A__ : Any = copy.deepcopy(model.config )
A__ : Dict = ["""FunnelBaseModel"""]
A__ : Optional[int] = TFAutoModel.from_config(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case_ )
A__ : Any = TFAutoModel.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase ( self ):
'''simple docstring'''
try:
AutoConfig.register("""new-model""" , snake_case_ )
A__ : List[Any] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(snake_case_ ):
auto_class.register(snake_case_ , snake_case_ )
auto_class.register(snake_case_ , snake_case_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case_ ):
auto_class.register(snake_case_ , snake_case_ )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ : str = BertModelTester(self ).get_config()
A__ : Union[str, Any] = NewModelConfig(**tiny_config.to_dict() )
A__ : str = auto_class.from_config(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case_ )
A__ : Any = auto_class.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case_ , """bert-base is not a local folder and is not a valid model identifier""" ):
A__ : Optional[int] = TFAutoModel.from_pretrained("""bert-base""" )
def lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A__ : str = TFAutoModel.from_pretrained(snake_case_ , revision="""aaaaaa""" )
def lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case_ , """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" , ):
A__ : Any = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(snake_case_ , """Use `from_pt=True` to load this model""" ):
A__ : Optional[Any] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Optional[Any] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
A__ : Optional[int] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
A__ : Dict = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
A__ : List[str] = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 363
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __UpperCAmelCase (unittest.TestCase , __A ):
'''simple docstring'''
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : List[Any] = load_tool("""text-classification""" )
self.tool.setup()
A__ : Any = load_tool("""text-classification""" , remote=snake_case_ )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : int = self.tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Optional[Any] = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : str = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : int = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
| 363
| 1
|
import heapq
def __magic_name__ ( lowercase ) -> set[int]:
"""simple docstring"""
lowercase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowercase , [-1 * len(lowercase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase_ : Tuple = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase_ : Union[str, Any] = heapq.heappop(lowercase )[1][0]
chosen_vertices.add(lowercase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase_ : int = elem[1][1].index(lowercase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowercase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 436
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : List[str] = """unispeech"""
def __init__( self, snake_case__=32, snake_case__=7_68, snake_case__=12, snake_case__=12, snake_case__=30_72, snake_case__="gelu", snake_case__=0.1, snake_case__=0.1, snake_case__=0.1, snake_case__=0.0, snake_case__=0.0, snake_case__=0.1, snake_case__=0.1, snake_case__=0.02, snake_case__=1E-5, snake_case__="group", snake_case__="gelu", snake_case__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12), snake_case__=(5, 2, 2, 2, 2, 2, 2), snake_case__=(10, 3, 3, 3, 3, 2, 2), snake_case__=False, snake_case__=1_28, snake_case__=16, snake_case__=False, snake_case__=True, snake_case__=0.05, snake_case__=10, snake_case__=2, snake_case__=0.0, snake_case__=10, snake_case__=0, snake_case__=3_20, snake_case__=2, snake_case__=0.1, snake_case__=1_00, snake_case__=2_56, snake_case__=2_56, snake_case__=0.1, snake_case__="mean", snake_case__=False, snake_case__=False, snake_case__=2_56, snake_case__=80, snake_case__=0, snake_case__=1, snake_case__=2, snake_case__=0.5, **snake_case__, ) -> List[str]:
"""simple docstring"""
super().__init__(**snake_case__, pad_token_id=snake_case__, bos_token_id=snake_case__, eos_token_id=snake_case__ )
lowercase_ : int = hidden_size
lowercase_ : List[Any] = feat_extract_norm
lowercase_ : Optional[Any] = feat_extract_activation
lowercase_ : Any = list(snake_case__ )
lowercase_ : Optional[Any] = list(snake_case__ )
lowercase_ : List[str] = list(snake_case__ )
lowercase_ : Optional[Any] = conv_bias
lowercase_ : Union[str, Any] = num_conv_pos_embeddings
lowercase_ : Optional[Any] = num_conv_pos_embedding_groups
lowercase_ : str = len(self.conv_dim )
lowercase_ : int = num_hidden_layers
lowercase_ : List[str] = intermediate_size
lowercase_ : int = hidden_act
lowercase_ : List[Any] = num_attention_heads
lowercase_ : str = hidden_dropout
lowercase_ : List[str] = attention_dropout
lowercase_ : Union[str, Any] = activation_dropout
lowercase_ : Optional[int] = feat_proj_dropout
lowercase_ : str = final_dropout
lowercase_ : Union[str, Any] = layerdrop
lowercase_ : Any = layer_norm_eps
lowercase_ : Tuple = initializer_range
lowercase_ : int = num_ctc_classes
lowercase_ : Tuple = vocab_size
lowercase_ : Any = do_stable_layer_norm
lowercase_ : int = use_weighted_layer_sum
lowercase_ : List[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase_ : List[str] = apply_spec_augment
lowercase_ : str = mask_time_prob
lowercase_ : int = mask_time_length
lowercase_ : str = mask_time_min_masks
lowercase_ : int = mask_feature_prob
lowercase_ : List[str] = mask_feature_length
lowercase_ : Optional[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase_ : Tuple = num_codevectors_per_group
lowercase_ : int = num_codevector_groups
lowercase_ : List[str] = contrastive_logits_temperature
lowercase_ : Optional[Any] = feat_quantizer_dropout
lowercase_ : Dict = num_negatives
lowercase_ : Dict = codevector_dim
lowercase_ : Optional[int] = proj_codevector_dim
lowercase_ : Any = diversity_loss_weight
# ctc loss
lowercase_ : int = ctc_loss_reduction
lowercase_ : Any = ctc_zero_infinity
# pretraining loss
lowercase_ : int = replace_prob
@property
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 436
| 1
|
'''simple docstring'''
def lowerCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : str ) ->int:
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
_SCREAMING_SNAKE_CASE = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
_SCREAMING_SNAKE_CASE = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
_SCREAMING_SNAKE_CASE = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314
|
import sys
SCREAMING_SNAKE_CASE : List[Any] = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowerCamelCase_ ( __UpperCamelCase = N ):
A_ = -sys.maxsize - 1
for i in range(len(__UpperCamelCase ) - 12 ):
A_ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
A_ = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 141
| 0
|
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowerCamelCase_ ( _lowerCamelCase ):
'''simple docstring'''
if not is_accelerate_available():
return method
lowerCamelCase__ : int = version.parse(accelerate.__version__ ).base_version
if version.parse(_lowerCamelCase ) < version.parse('0.17.0' ):
return method
def wrapper(self , *_lowerCamelCase , **_lowerCamelCase ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *_lowerCamelCase , **_lowerCamelCase )
return wrapper
| 707
|
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A_ : Optional[int] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A_ : List[str] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
A_ : Tuple = BeautifulSoup(res.text, "html.parser")
A_ : Dict = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 696
| 0
|
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
if n_term == "":
return []
lowerCamelCase = []
for temp in range(int(lowerCamelCase__ ) ):
series.append(f'1/{temp + 1}' if series else """1""" )
return series
if __name__ == "__main__":
UpperCAmelCase : str = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 457
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCAmelCase : Any = logging.get_logger(__name__)
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : List[Any] = ["audio_values", "audio_mask"]
def __init__( self , A=20_48 , A=1 , A=[16, 16] , A=1_28 , A=4_41_00 , A=86 , A=20_48 , A=0.0 , **A , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , **A , )
lowerCamelCase = spectrogram_length
lowerCamelCase = num_channels
lowerCamelCase = patch_size
lowerCamelCase = feature_size // self.patch_size[1]
lowerCamelCase = n_fft
lowerCamelCase = sampling_rate // hop_length_to_sampling_rate
lowerCamelCase = sampling_rate
lowerCamelCase = padding_value
lowerCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=A , norm="""slaney""" , mel_scale="""slaney""" , ).T
def __A ( self , A ) -> np.ndarray:
'''simple docstring'''
lowerCamelCase = spectrogram(
A , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
lowerCamelCase = log_spec[:, :-1]
lowerCamelCase = log_spec - 20.0
lowerCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , A , A = None , A = True , A = None , A = False , A = False , **A , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
F' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
F' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
lowerCamelCase = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A ):
lowerCamelCase = [np.asarray(A , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase = np.array(A ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase = padded_audio_features * self.padding_value
for i in range(len(A ) ):
lowerCamelCase = audio_features[i]
lowerCamelCase = feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
lowerCamelCase = {"""audio_values""": padded_audio_features}
lowerCamelCase = BatchFeature(data=A , tensor_type=A )
return encoded_inputs
| 457
| 1
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Dict = len(_lowerCamelCase )
lowerCamelCase__ : Dict = len(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
lowerCamelCase__ : str = True
for i in range(_lowerCamelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCamelCase__ : Optional[int] = True
if a[i].islower():
lowerCamelCase__ : List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696
|
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
lowerCamelCase__ : Union[str, Any] = str(file.readlines()[0] )
lowerCamelCase__ : int = names.replace('"' , '' ).split(',' )
names.sort()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 696
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Union[str, Any] =StableDiffusionInstructPixaPixPipeline
a : List[str] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
a : Any =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
a : Union[str, Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a ( self ):
torch.manual_seed(0 )
UpperCamelCase_: Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
UpperCamelCase_: List[Any] = PNDMScheduler(skip_prk_steps=_lowerCamelCase )
torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase_: Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
UpperCamelCase_: int = CLIPTextModel(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase_: Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _a ( self , _lowerCamelCase , _lowerCamelCase=0 ):
UpperCamelCase_: str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
UpperCamelCase_: Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase_: str = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('RGB' )
if str(_lowerCamelCase ).startswith('mps' ):
UpperCamelCase_: List[Any] = torch.manual_seed(_lowerCamelCase )
else:
UpperCamelCase_: Tuple = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def _a ( self ):
UpperCamelCase_: List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_: str = self.get_dummy_components()
UpperCamelCase_: Dict = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
UpperCamelCase_: int = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: int = self.get_dummy_inputs(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = sd_pipe(**_lowerCamelCase ).images
UpperCamelCase_: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
UpperCamelCase_: Optional[int] = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self ):
UpperCamelCase_: str = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_: int = self.get_dummy_components()
UpperCamelCase_: Dict = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
UpperCamelCase_: List[Any] = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: Any = self.get_dummy_inputs(_lowerCamelCase )
UpperCamelCase_: Dict = 'french fries'
UpperCamelCase_: Optional[int] = sd_pipe(**_lowerCamelCase , negative_prompt=_lowerCamelCase )
UpperCamelCase_: List[str] = output.images
UpperCamelCase_: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
UpperCamelCase_: Dict = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self ):
UpperCamelCase_: int = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_: Tuple = self.get_dummy_components()
UpperCamelCase_: Any = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
UpperCamelCase_: List[Any] = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: Dict = self.get_dummy_inputs(_lowerCamelCase )
UpperCamelCase_: List[Any] = [inputs['prompt']] * 2
UpperCamelCase_: Optional[Any] = np.array(inputs['image'] ).astype(np.floataa ) / 2_5_5.0
UpperCamelCase_: Any = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
UpperCamelCase_: Tuple = image / 2 + 0.5
UpperCamelCase_: int = image.permute(0 , 3 , 1 , 2 )
UpperCamelCase_: int = image.repeat(2 , 1 , 1 , 1 )
UpperCamelCase_: List[Any] = sd_pipe(**_lowerCamelCase ).images
UpperCamelCase_: List[str] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
UpperCamelCase_: Optional[int] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self ):
UpperCamelCase_: int = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_: Any = self.get_dummy_components()
UpperCamelCase_: str = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' )
UpperCamelCase_: Dict = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
UpperCamelCase_: str = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = self.get_dummy_inputs(_lowerCamelCase )
UpperCamelCase_: Optional[int] = sd_pipe(**_lowerCamelCase ).images
UpperCamelCase_: Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Tuple = [round(_lowerCamelCase , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(_lowerCamelCase ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
UpperCamelCase_: Tuple = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _a ( self ):
UpperCamelCase_: int = self.get_dummy_components()
UpperCamelCase_: Optional[Any] = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
UpperCamelCase_: str = VaeImageProcessor(do_resize=_lowerCamelCase , do_normalize=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: Dict = pipe(**self.get_dummy_inputs_by_type(_lowerCamelCase , input_image_type='pt' ) )[0]
UpperCamelCase_: Union[str, Any] = components['vae']
UpperCamelCase_: Union[str, Any] = self.get_dummy_inputs_by_type(_lowerCamelCase , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
UpperCamelCase_: str = vae.encode(inputs[image_param] ).latent_dist.mode()
UpperCamelCase_: Dict = pipe(**_lowerCamelCase )[0]
UpperCamelCase_: str = np.abs(out - out_latents_inputs ).max()
self.assertLess(_lowerCamelCase , 1e-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _lowerCamelCase=0 ):
UpperCamelCase_: int = torch.manual_seed(_lowerCamelCase )
UpperCamelCase_: int = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
UpperCamelCase_: Union[str, Any] = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def _a ( self ):
UpperCamelCase_: Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
UpperCamelCase_: List[str] = self.get_inputs()
UpperCamelCase_: Optional[int] = pipe(**_lowerCamelCase ).images
UpperCamelCase_: Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase_: Optional[Any] = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _a ( self ):
UpperCamelCase_: List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_lowerCamelCase )
UpperCamelCase_: List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
UpperCamelCase_: Union[str, Any] = self.get_inputs()
UpperCamelCase_: Optional[Any] = pipe(**_lowerCamelCase ).images
UpperCamelCase_: Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase_: Any = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _a ( self ):
UpperCamelCase_: Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_lowerCamelCase )
UpperCamelCase_: Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
UpperCamelCase_: int = self.get_inputs()
UpperCamelCase_: Any = pipe(**_lowerCamelCase ).images
UpperCamelCase_: str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase_: str = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _a ( self ):
UpperCamelCase_: Optional[Any] = 0
def callback_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> None:
UpperCamelCase_: List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCamelCase_: List[str] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
UpperCamelCase_: Optional[Any] = latents[0, -3:, -3:, -1]
UpperCamelCase_: Tuple = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
UpperCamelCase_: str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
UpperCamelCase_: Optional[int] = latents[0, -3:, -3:, -1]
UpperCamelCase_: Any = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
UpperCamelCase_: Tuple = False
UpperCamelCase_: Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa )
UpperCamelCase_: Tuple = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
UpperCamelCase_: Optional[Any] = self.get_inputs()
pipe(**_lowerCamelCase , callback=_lowerCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _a ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase_: List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa )
UpperCamelCase_: Optional[int] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase_: Union[str, Any] = self.get_inputs()
UpperCamelCase_: Optional[Any] = pipe(**_lowerCamelCase )
UpperCamelCase_: str = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def _a ( self ):
UpperCamelCase_: Tuple = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCamelCase_: Dict = inputs['image'].resize((5_0_4, 5_0_4) )
UpperCamelCase_: int = 'timbrooks/instruct-pix2pix'
UpperCamelCase_: Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_lowerCamelCase , safety_checker=_lowerCamelCase , )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
UpperCamelCase_: Tuple = pipe(**_lowerCamelCase )
UpperCamelCase_: Optional[int] = output.images[0]
UpperCamelCase_: List[Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
UpperCamelCase_: Union[str, Any] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 57
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
__lowerCamelCase : Optional[Any] =XLMProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE )
__lowerCamelCase , __lowerCamelCase : List[Any] =XLMProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
else:
__lowerCamelCase : int =ProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE )
__lowerCamelCase , __lowerCamelCase : int =ProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[Any] =['''key_proj''', '''value_proj''', '''query_proj''']
__lowerCamelCase : Tuple ={
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
__lowerCamelCase : int =key.split('''.''' )
if attributes[0] == "lm_head":
__lowerCamelCase : int =prophet
__lowerCamelCase : Optional[int] =prophet_old
else:
__lowerCamelCase : Any =prophet.prophetnet
__lowerCamelCase : Union[str, Any] =prophet_old.model
__lowerCamelCase : Optional[Any] =False
for attribute in attributes:
if attribute in mapping:
__lowerCamelCase : Optional[Any] =mapping[attribute]
if not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) > 0:
__lowerCamelCase : Any =attribute
elif hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Any =attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowerCamelCase : str =old_model.weight
logger.info(F'{attribute} is initialized.' )
__lowerCamelCase : Any =True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowerCamelCase : Union[str, Any] =old_model.bias
logger.info(F'{attribute} is initialized' )
__lowerCamelCase : str =True
break
elif attribute in special_keys and hasattr(SCREAMING_SNAKE_CASE , '''in_proj_weight''' ):
__lowerCamelCase : int =old_model.in_proj_weight.shape[0] // 3
__lowerCamelCase : Union[str, Any] =getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowerCamelCase : List[str] =nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowerCamelCase : str =nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowerCamelCase : List[str] =nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowerCamelCase : Tuple =nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowerCamelCase : Optional[Any] =nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowerCamelCase : int =nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowerCamelCase : Dict =True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowerCamelCase : str =nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowerCamelCase : Dict =True
break
if attribute.isdigit():
__lowerCamelCase : List[str] =model[int(SCREAMING_SNAKE_CASE )]
__lowerCamelCase : Optional[Any] =old_model[int(SCREAMING_SNAKE_CASE )]
else:
__lowerCamelCase : int =getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if old_attribute == "":
__lowerCamelCase : Dict =old_model
else:
if not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
__lowerCamelCase : Tuple =getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 179
| 0
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ) ->Tuple:
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Any = 13
SCREAMING_SNAKE_CASE : List[Any] = 7
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : List[str] = 99
SCREAMING_SNAKE_CASE : Optional[int] = 384
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : List[Any] = 4
SCREAMING_SNAKE_CASE : Optional[int] = 37
SCREAMING_SNAKE_CASE : int = '''gelu'''
SCREAMING_SNAKE_CASE : List[str] = 0.1
SCREAMING_SNAKE_CASE : Tuple = 0.1
SCREAMING_SNAKE_CASE : Tuple = 512
SCREAMING_SNAKE_CASE : Union[str, Any] = 16
SCREAMING_SNAKE_CASE : Union[str, Any] = 2
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0_2
SCREAMING_SNAKE_CASE : Dict = 3
SCREAMING_SNAKE_CASE : Optional[int] = 4
SCREAMING_SNAKE_CASE : Tuple = 128
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : int = 9
SCREAMING_SNAKE_CASE : str = 1
SCREAMING_SNAKE_CASE : Optional[Any] = None
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : str = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : List[str] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Any:
SCREAMING_SNAKE_CASE : List[str] = TFConvBertModel(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : Tuple = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : Optional[int] = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[int] = TFConvBertForMaskedLM(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : int = TFConvBertForSequenceClassification(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->str:
SCREAMING_SNAKE_CASE : Any = self.num_choices
SCREAMING_SNAKE_CASE : Union[str, Any] = TFConvBertForMultipleChoice(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(_lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(_lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Optional[Any] = tf.tile(tf.expand_dims(_lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE : int = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : int = TFConvBertForTokenClassification(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : str = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : Any = TFConvBertForQuestionAnswering(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : Any = model(_lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : str = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : int = False
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ) ->Optional[Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
@slow
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Any = True
if hasattr(_lowerCamelCase , '''use_cache''' ):
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : int = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(self.model_tester , '''key_length''' , _lowerCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = len(model(_lowerCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCamelCase , saved_model=_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = os.path.join(_lowerCamelCase , '''saved_model''' , '''1''' )
SCREAMING_SNAKE_CASE : int = tf.keras.models.load_model(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Optional[int] = outputs['''encoder_hidden_states''']
SCREAMING_SNAKE_CASE : str = outputs['''encoder_attentions''']
else:
SCREAMING_SNAKE_CASE : Optional[int] = outputs['''hidden_states''']
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs['''attentions''']
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
SCREAMING_SNAKE_CASE : int = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : int = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Tuple = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(self.model_tester , '''key_length''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , '''key_length''' , _lowerCamelCase )
def check_decoder_attentions_output(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowerCamelCase )
self.assertEqual(out_len % 2 , 0 )
SCREAMING_SNAKE_CASE : List[str] = outputs.decoder_attentions
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : str = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowerCamelCase )
self.assertEqual(config.output_hidden_states , _lowerCamelCase )
check_encoder_attentions_output(_lowerCamelCase )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : List[str] = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , _lowerCamelCase )
check_decoder_attentions_output(_lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : List[Any] = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , _lowerCamelCase )
check_encoder_attentions_output(_lowerCamelCase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : List[str] = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , _lowerCamelCase )
check_encoder_attentions_output(_lowerCamelCase )
@require_tf
class a_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
SCREAMING_SNAKE_CASE : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = [1, 6, 768]
self.assertEqual(output.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCamelCase , atol=1e-4 )
| 333
|
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->int:
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
SCREAMING_SNAKE_CASE : Optional[Any] = img
SCREAMING_SNAKE_CASE : Optional[Any] = img.shape[1]
SCREAMING_SNAKE_CASE : Optional[Any] = img.shape[0]
SCREAMING_SNAKE_CASE : Dict = dst_width
SCREAMING_SNAKE_CASE : List[Any] = dst_height
SCREAMING_SNAKE_CASE : int = self.src_w / self.dst_w
SCREAMING_SNAKE_CASE : List[str] = self.src_h / self.dst_h
SCREAMING_SNAKE_CASE : Optional[Any] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def __lowerCAmelCase ( self ) ->Optional[int]:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
SCREAMING_SNAKE_CASE : str = self.img[self.get_y(_lowerCamelCase )][self.get_x(_lowerCamelCase )]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
return int(self.ratio_x * x )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
return int(self.ratio_y * y )
if __name__ == "__main__":
a__ , a__ : int = 800, 600
a__ : List[str] = imread('''image_data/lena.jpg''', 1)
a__ : Tuple = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output
)
waitKey(0)
destroyAllWindows()
| 333
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class snake_case ( _a ):
"""simple docstring"""
_lowerCAmelCase = """dandelin/vilt-b32-finetuned-vqa"""
_lowerCAmelCase = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
_lowerCAmelCase = """image_qa"""
_lowerCAmelCase = AutoProcessor
_lowerCAmelCase = AutoModelForVisualQuestionAnswering
_lowerCAmelCase = ["""image""", """text"""]
_lowerCAmelCase = ["""text"""]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Dict:
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*A_ , **A_ )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
return self.pre_processor(A_ , A_ , return_tensors='''pt''' )
def lowercase__ ( self , lowerCamelCase ) -> Any:
"""simple docstring"""
with torch.no_grad():
return self.model(**A_ ).logits
def lowercase__ ( self , lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Union[str, Any] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 261
|
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : int ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ ) and number_of_steps > 0
), f'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
__lowercase , __lowercase = 1, 1
for _ in range(number_of_steps - 1 ):
__lowercase , __lowercase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 616
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str] ):
snake_case__ : List[Any] = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
snake_case__ : str = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
sd_pipe.set_scheduler('sample_euler' )
snake_case__ : Any = 'A painting of a squirrel eating a burger'
snake_case__ : List[str] = torch.manual_seed(0 )
snake_case__ : Tuple = sd_pipe([prompt] , generator=_lowerCamelCase , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
snake_case__ : Optional[Any] = output.images
snake_case__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : List[str] = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : int ):
snake_case__ : Tuple = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
snake_case__ : Any = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
sd_pipe.set_scheduler('sample_euler' )
snake_case__ : str = 'A painting of a squirrel eating a burger'
snake_case__ : List[str] = torch.manual_seed(0 )
snake_case__ : Union[str, Any] = sd_pipe([prompt] , generator=_lowerCamelCase , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
snake_case__ : str = output.images
snake_case__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Optional[int] = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def UpperCAmelCase__ ( self : Tuple ):
snake_case__ : Union[str, Any] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
snake_case__ : Optional[Any] = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
snake_case__ : List[str] = 'A painting of a squirrel eating a burger'
snake_case__ : List[Any] = torch.manual_seed(0 )
snake_case__ : Optional[Any] = sd_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=_lowerCamelCase , )
snake_case__ : Tuple = output.images
snake_case__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Optional[Any] = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 303
|
def lowercase__( A = 1_0 , A = 1_0_0_0 , A = True ):
assert (
isinstance(A , A )
and isinstance(A , A )
and isinstance(A , A )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def lowercase__( A , A ):
return int((number_a + number_a) / 2 )
def lowercase__( A , A , A ):
assert (
isinstance(A , A ) and isinstance(A , A ) and isinstance(A , A )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(A ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
snake_case__ : Dict = lower
snake_case__ : Tuple = higher
snake_case__ : List[str] = []
while True:
snake_case__ : Dict = get_avg(A , A )
last_numbers.append(A )
if answer(A ) == "low":
snake_case__ : Tuple = number
elif answer(A ) == "high":
snake_case__ : Union[str, Any] = number
else:
break
print(f'''guess the number : {last_numbers[-1]}''' )
print(f'''details : {last_numbers!s}''' )
def lowercase__( ):
snake_case__ : Tuple = int(input('Enter lower value : ' ).strip() )
snake_case__ : Optional[Any] = int(input('Enter high value : ' ).strip() )
snake_case__ : Dict = int(input('Enter value to guess : ' ).strip() )
guess_the_number(A , A , A )
if __name__ == "__main__":
main()
| 303
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ ='''facebook/bart-large-mnli'''
lowerCAmelCase__ =(
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
lowerCAmelCase__ ='''text_classifier'''
lowerCAmelCase__ =AutoTokenizer
lowerCAmelCase__ =AutoModelForSequenceClassification
lowerCAmelCase__ =['''text''', ['''text''']]
lowerCAmelCase__ =['''text''']
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
super().setup()
snake_case__ : Optional[Any] =self.model.config
snake_case__ : int =-1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
snake_case__ : Any =int(__SCREAMING_SNAKE_CASE )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
snake_case__ : str =labels
return self.pre_processor(
[text] * len(__SCREAMING_SNAKE_CASE ) , [f'''This example is {label}''' for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
snake_case__ : Any =outputs.logits
snake_case__ : Optional[int] =torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 381
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 381
| 1
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCAmelCase = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None )-> Tuple:
"""simple docstring"""
require_version(deps[pkg] , lowerCAmelCase__ )
| 721
|
import requests
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> None:
"""simple docstring"""
snake_case_ = {'''Content-Type''': '''application/json'''}
snake_case_ = requests.post(SCREAMING_SNAKE_CASE , json={'''text''': message_body} , headers=SCREAMING_SNAKE_CASE )
if response.status_code != 200:
snake_case_ = (
'''Request to slack returned an error '''
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 531
| 0
|
def a__ ( _UpperCamelCase : int ,_UpperCamelCase : int ):
return int((input_a, input_a).count(0 ) == 0 )
def a__ ( ):
assert and_gate(0 ,0 ) == 0
assert and_gate(0 ,1 ) == 0
assert and_gate(1 ,0 ) == 0
assert and_gate(1 ,1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 175
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """canine"""
def __init__( self , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=16384 , __UpperCAmelCase=16 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0 , __UpperCAmelCase=0Xe_000 , __UpperCAmelCase=0Xe_001 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=8 , __UpperCAmelCase=16384 , __UpperCAmelCase=128 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = type_vocab_size
__lowerCamelCase = layer_norm_eps
# Character config:
__lowerCamelCase = downsampling_rate
__lowerCamelCase = upsampling_kernel_size
__lowerCamelCase = num_hash_functions
__lowerCamelCase = num_hash_buckets
__lowerCamelCase = local_transformer_stride
| 175
| 1
|
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _a ( _SCREAMING_SNAKE_CASE : List[Any] ):
_SCREAMING_SNAKE_CASE = int(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = t // 3600, (t // 60) % 60, t % 60
return F'{h}:{m:02d}:{s:02d}' if h != 0 else F'{m:02d}:{s:02d}'
def _a ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any]=300 ):
# docstyle-ignore
return F'\n <div>\n {prefix}\n <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>\n {label}\n </div>\n '
def _a ( _SCREAMING_SNAKE_CASE : str ):
_SCREAMING_SNAKE_CASE = "<table border=\"1\" class=\"dataframe\">\n"
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F' <th>{i}</th>\n'
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_SCREAMING_SNAKE_CASE = F'{elt:.6f}' if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else str(_SCREAMING_SNAKE_CASE )
html_code += F' <td>{elt}</td>\n'
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCAmelCase :
a : List[Any] = 5
a : Any = 0.2
def __init__( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = 300 , ):
_SCREAMING_SNAKE_CASE = total
_SCREAMING_SNAKE_CASE = "" if prefix is None else prefix
_SCREAMING_SNAKE_CASE = leave
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = width
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
def lowercase ( self , UpperCamelCase , UpperCamelCase = False , UpperCamelCase = None ):
_SCREAMING_SNAKE_CASE = value
if comment is not None:
_SCREAMING_SNAKE_CASE = comment
if self.last_value is None:
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = value
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = self.warmup
_SCREAMING_SNAKE_CASE = 1
self.update_bar(UpperCamelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_SCREAMING_SNAKE_CASE = self.elapsed_time / (value - self.start_value)
else:
_SCREAMING_SNAKE_CASE = None
if value >= self.total:
_SCREAMING_SNAKE_CASE = self.total
_SCREAMING_SNAKE_CASE = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_SCREAMING_SNAKE_CASE = self.average_time_per_item * (self.total - value)
self.update_bar(UpperCamelCase )
_SCREAMING_SNAKE_CASE = value
_SCREAMING_SNAKE_CASE = current_time
if self.average_time_per_item is None:
_SCREAMING_SNAKE_CASE = 1
else:
_SCREAMING_SNAKE_CASE = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowercase ( self , UpperCamelCase , UpperCamelCase=None ):
_SCREAMING_SNAKE_CASE = " " * (len(str(self.total ) ) - len(str(UpperCamelCase ) )) + str(UpperCamelCase )
if self.elapsed_time is None:
_SCREAMING_SNAKE_CASE = F'[{spaced_value}/{self.total} : < :'
elif self.predicted_remaining is None:
_SCREAMING_SNAKE_CASE = F'[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'
else:
_SCREAMING_SNAKE_CASE = (
F'[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'
F' {format_time(self.predicted_remaining )}'
)
self.label += F', {1/self.average_time_per_item:.2f} it/s'
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F', {self.comment}]'
self.display()
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_SCREAMING_SNAKE_CASE = disp.display(disp.HTML(self.html_code ) , display_id=UpperCamelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowercase ( self ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("" ) )
class lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , UpperCamelCase , UpperCamelCase=None ):
super().__init__(UpperCamelCase )
_SCREAMING_SNAKE_CASE = None if column_names is None else [column_names]
_SCREAMING_SNAKE_CASE = None
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_SCREAMING_SNAKE_CASE = disp.display(disp.HTML(self.html_code ) , display_id=UpperCamelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowercase ( self , UpperCamelCase ):
if self.inner_table is None:
_SCREAMING_SNAKE_CASE = [list(values.keys() ), list(values.values() )]
else:
_SCREAMING_SNAKE_CASE = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(UpperCamelCase )
_SCREAMING_SNAKE_CASE = columns
self.inner_table.append([values[c] for c in columns] )
def lowercase ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=300 ):
_SCREAMING_SNAKE_CASE = NotebookProgressBar(UpperCamelCase , prefix=UpperCamelCase , parent=self , width=UpperCamelCase )
return self.child_bar
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = None
self.display()
class lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self ):
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = False
def lowercase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ):
_SCREAMING_SNAKE_CASE = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss" )
_SCREAMING_SNAKE_CASE = NotebookTrainingTracker(state.max_steps , UpperCamelCase )
def lowercase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ):
_SCREAMING_SNAKE_CASE = int(state.epoch ) if int(state.epoch ) == state.epoch else F'{state.epoch:.2f}'
self.training_tracker.update(
state.global_step + 1 , comment=F'Epoch {epoch}/{state.num_train_epochs}' , force_update=self._force_next_update , )
_SCREAMING_SNAKE_CASE = False
def lowercase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , **UpperCamelCase ):
if not has_length(UpperCamelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_SCREAMING_SNAKE_CASE = self.training_tracker.add_child(len(UpperCamelCase ) )
else:
_SCREAMING_SNAKE_CASE = NotebookProgressBar(len(UpperCamelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowercase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ):
if self.prediction_bar is not None:
self.prediction_bar.close()
_SCREAMING_SNAKE_CASE = None
def lowercase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , **UpperCamelCase ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_SCREAMING_SNAKE_CASE = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
_SCREAMING_SNAKE_CASE = state.global_step
self.training_tracker.write_line(UpperCamelCase )
def lowercase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , **UpperCamelCase ):
if self.training_tracker is not None:
_SCREAMING_SNAKE_CASE = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history ):
if "loss" in log:
_SCREAMING_SNAKE_CASE = log["loss"]
break
if self.first_column == "Epoch":
_SCREAMING_SNAKE_CASE = int(state.epoch )
else:
_SCREAMING_SNAKE_CASE = state.global_step
_SCREAMING_SNAKE_CASE = "eval"
for k in metrics:
if k.endswith("_loss" ):
_SCREAMING_SNAKE_CASE = re.sub(R"\_loss$" , "" , UpperCamelCase )
_SCREAMING_SNAKE_CASE = metrics.pop("total_flos" , UpperCamelCase )
_SCREAMING_SNAKE_CASE = metrics.pop("epoch" , UpperCamelCase )
_SCREAMING_SNAKE_CASE = metrics.pop(F'{metric_key_prefix}_runtime' , UpperCamelCase )
_SCREAMING_SNAKE_CASE = metrics.pop(F'{metric_key_prefix}_samples_per_second' , UpperCamelCase )
_SCREAMING_SNAKE_CASE = metrics.pop(F'{metric_key_prefix}_steps_per_second' , UpperCamelCase )
_SCREAMING_SNAKE_CASE = metrics.pop(F'{metric_key_prefix}_jit_compilation_time' , UpperCamelCase )
for k, v in metrics.items():
if k == F'{metric_key_prefix}_loss':
_SCREAMING_SNAKE_CASE = v
else:
_SCREAMING_SNAKE_CASE = k.split("_" )
_SCREAMING_SNAKE_CASE = " ".join([part.capitalize() for part in splits[1:]] )
_SCREAMING_SNAKE_CASE = v
self.training_tracker.write_line(UpperCamelCase )
self.training_tracker.remove_child()
_SCREAMING_SNAKE_CASE = None
# Evaluation takes a long time so we should force the next update.
_SCREAMING_SNAKE_CASE = True
def lowercase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ):
self.training_tracker.update(
state.global_step , comment=F'Epoch {int(state.epoch )}/{state.num_train_epochs}' , force_update=UpperCamelCase )
_SCREAMING_SNAKE_CASE = None
| 493
|
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
a : int = LayoutLMTokenizer
a : Optional[int] = LayoutLMTokenizerFast
a : Optional[int] = True
a : Any = True
def lowercase ( self ):
super().setUp()
_SCREAMING_SNAKE_CASE = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowercase ( self , **UpperCamelCase ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowercase ( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = "UNwant\u00E9d,running"
_SCREAMING_SNAKE_CASE = "unwanted, running"
return input_text, output_text
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(UpperCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [7, 4, 5, 10, 8, 9] )
def lowercase ( self ):
pass
| 493
| 1
|
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : list ) -> str:
"""simple docstring"""
_enforce_args(__magic_name__ , __magic_name__ )
if n == 0:
return 0
lowercase__ = float("""-inf""" )
for i in range(1 , n + 1 ):
lowercase__ = max(
__magic_name__ , prices[i - 1] + naive_cut_rod_recursive(n - i , __magic_name__ ) )
return max_revue
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : list ) -> Optional[int]:
"""simple docstring"""
_enforce_args(__magic_name__ , __magic_name__ )
lowercase__ = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__magic_name__ , __magic_name__ , __magic_name__ )
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : list , __magic_name__ : list ) -> int:
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowercase__ = float("""-inf""" )
for i in range(1 , n + 1 ):
lowercase__ = max(
__magic_name__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __magic_name__ , __magic_name__ ) , )
lowercase__ = max_revenue
return max_rev[n]
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : list ) -> Tuple:
"""simple docstring"""
_enforce_args(__magic_name__ , __magic_name__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowercase__ = [float("""-inf""" ) for _ in range(n + 1 )]
lowercase__ = 0
for i in range(1 , n + 1 ):
lowercase__ = max_rev[i]
for j in range(1 , i + 1 ):
lowercase__ = max(__magic_name__ , prices[j - 1] + max_rev[i - j] )
lowercase__ = max_revenue_i
return max_rev[n]
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : list ) -> Union[str, Any]:
"""simple docstring"""
if n < 0:
lowercase__ = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(__magic_name__ )
if n > len(__magic_name__ ):
lowercase__ = (
"""Each integral piece of rod must have a corresponding price. """
f'''Got n = {n} but length of prices = {len(__magic_name__ )}'''
)
raise ValueError(__magic_name__ )
def UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
lowercase__ = [6, 10, 12, 15, 20, 23]
lowercase__ = len(__magic_name__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowercase__ = 36
lowercase__ = top_down_cut_rod(__magic_name__ , __magic_name__ )
lowercase__ = bottom_up_cut_rod(__magic_name__ , __magic_name__ )
lowercase__ = naive_cut_rod_recursive(__magic_name__ , __magic_name__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 15
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
lowerCAmelCase__ ="examples/"
lowerCAmelCase__ ={
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
lowerCAmelCase__ ={
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
lowerCAmelCase__ ="README.md"
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]:
with open(UpperCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE = f.read()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = REPLACE_PATTERNS[pattern]
__SCREAMING_SNAKE_CASE = replace.replace('''VERSION''' , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = re_pattern.sub(UpperCAmelCase__ , UpperCAmelCase__ )
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCAmelCase__ )
def _a ( UpperCAmelCase__ ) -> Any:
for folder, directories, fnames in os.walk(UpperCAmelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , pattern='''examples''' )
def _a ( UpperCAmelCase__ , UpperCAmelCase__=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if not patch:
update_version_in_examples(UpperCAmelCase__ )
def _a ( ) -> Dict:
__SCREAMING_SNAKE_CASE = '''🤗 Transformers currently provides the following architectures'''
__SCREAMING_SNAKE_CASE = '''1. Want to contribute a new model?'''
with open(UpperCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
# Find the start of the list.
__SCREAMING_SNAKE_CASE = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__SCREAMING_SNAKE_CASE = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__SCREAMING_SNAKE_CASE = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCAmelCase__ )
def _a ( ) -> Tuple:
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__SCREAMING_SNAKE_CASE = f.read()
__SCREAMING_SNAKE_CASE = REPLACE_PATTERNS['''init'''][0].search(UpperCAmelCase__ ).groups()[0]
return packaging.version.parse(UpperCAmelCase__ )
def _a ( UpperCAmelCase__=False ) -> Dict:
__SCREAMING_SNAKE_CASE = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__SCREAMING_SNAKE_CASE = default_version.base_version
elif patch:
__SCREAMING_SNAKE_CASE = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__SCREAMING_SNAKE_CASE = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__SCREAMING_SNAKE_CASE = input(f"""Which version are you releasing? [{default_version}]""" )
if len(UpperCAmelCase__ ) == 0:
__SCREAMING_SNAKE_CASE = default_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCAmelCase__ , patch=UpperCAmelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def _a ( ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = get_version()
__SCREAMING_SNAKE_CASE = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__SCREAMING_SNAKE_CASE = current_version.base_version
# Check with the user we got that right.
__SCREAMING_SNAKE_CASE = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(UpperCAmelCase__ ) == 0:
__SCREAMING_SNAKE_CASE = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCAmelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase__ =argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
lowerCAmelCase__ =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 482
| 0
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A__ :
@staticmethod
def _lowerCamelCase ( *a : List[Any] , **a : Any ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class A__ ( unittest.TestCase ):
lowercase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def _lowerCamelCase ( self : int , a : Any , a : Any , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = ObjectDetectionPipeline(model=a , image_processor=a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def _lowerCamelCase ( self : List[Any] , a : str , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 )
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
'score': ANY(a ),
'label': ANY(a ),
'box': {'xmin': ANY(a ), 'ymin': ANY(a ), 'xmax': ANY(a ), 'ymax': ANY(a )},
} , )
import datasets
lowerCAmelCase__ : Any = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowerCAmelCase__ : Union[str, Any] = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
lowerCAmelCase__ : Union[str, Any] = object_detector(a , threshold=0.0 )
self.assertEqual(len(a ) , len(a ) )
for outputs in batch_outputs:
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
'score': ANY(a ),
'label': ANY(a ),
'box': {'xmin': ANY(a ), 'ymin': ANY(a ), 'xmax': ANY(a ), 'ymax': ANY(a )},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
pass
@require_torch
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'hf-internal-testing/tiny-detr-mobilenetsv3'
lowerCAmelCase__ : Optional[Any] = AutoModelForObjectDetection.from_pretrained(a )
lowerCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(a )
lowerCAmelCase__ : Optional[Any] = ObjectDetectionPipeline(model=a , feature_extractor=a )
lowerCAmelCase__ : Optional[int] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
] , )
lowerCAmelCase__ : Optional[int] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
] , )
@require_torch
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = 'facebook/detr-resnet-50'
lowerCAmelCase__ : Optional[Any] = AutoModelForObjectDetection.from_pretrained(a )
lowerCAmelCase__ : List[str] = AutoFeatureExtractor.from_pretrained(a )
lowerCAmelCase__ : Any = ObjectDetectionPipeline(model=a , feature_extractor=a )
lowerCAmelCase__ : List[str] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
lowerCAmelCase__ : int = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Dict = 'facebook/detr-resnet-50'
lowerCAmelCase__ : Tuple = pipeline('object-detection' , model=a )
lowerCAmelCase__ : List[str] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
lowerCAmelCase__ : Tuple = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 0.9_9_8_5
lowerCAmelCase__ : str = 'facebook/detr-resnet-50'
lowerCAmelCase__ : Optional[Any] = pipeline('object-detection' , model=a )
lowerCAmelCase__ : Dict = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=a )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Dict = 'Narsil/layoutlmv3-finetuned-funsd'
lowerCAmelCase__ : str = 0.9_9_9_3
lowerCAmelCase__ : int = pipeline('object-detection' , model=a , threshold=a )
lowerCAmelCase__ : Union[str, Any] = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
] , )
| 69
|
import unittest
from transformers import DonutProcessor
lowerCamelCase__ = """naver-clova-ix/donut-base"""
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = DonutProcessor.from_pretrained(a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
lowerCAmelCase__ : Union[str, Any] = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
lowerCAmelCase__ : Optional[Any] = self.processor.tokenajson(a )
self.assertDictEqual(a , a )
| 69
| 1
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return [sentence[i : i + ngram_size] for i in range(len(SCREAMING_SNAKE_CASE_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 413
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = (DDPMScheduler,)
def __lowerCamelCase ( self : Optional[int] , **A : List[str] ) ->Union[str, Any]:
lowerCamelCase__ : int = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**A )
return config
def __lowerCamelCase ( self : Any ) ->Optional[int]:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A )
def __lowerCamelCase ( self : str ) ->List[str]:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A , beta_end=A )
def __lowerCamelCase ( self : Union[str, Any] ) ->str:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A )
def __lowerCamelCase ( self : str ) ->Dict:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A )
def __lowerCamelCase ( self : Dict ) ->str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A )
def __lowerCamelCase ( self : Tuple ) ->Optional[int]:
self.check_over_configs(thresholding=A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A , prediction_type=A , sample_max_value=A , )
def __lowerCamelCase ( self : Optional[Any] ) ->str:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def __lowerCamelCase ( self : List[str] ) ->Optional[Any]:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=A )
def __lowerCamelCase ( self : List[Any] ) ->Optional[int]:
lowerCamelCase__ : Tuple = self.scheduler_classes[0]
lowerCamelCase__ : Any = self.get_scheduler_config()
lowerCamelCase__ : Dict = scheduler_class(**A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[int]:
lowerCamelCase__ : int = self.scheduler_classes[0]
lowerCamelCase__ : Any = self.get_scheduler_config()
lowerCamelCase__ : Union[str, Any] = scheduler_class(**A )
lowerCamelCase__ : Dict = len(A )
lowerCamelCase__ : Any = self.dummy_model()
lowerCamelCase__ : List[Any] = self.dummy_sample_deter
lowerCamelCase__ : Optional[int] = torch.manual_seed(0 )
for t in reversed(range(A ) ):
# 1. predict noise residual
lowerCamelCase__ : Union[str, Any] = model(A , A )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ : Any = scheduler.step(A , A , A , generator=A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase__ : Union[str, Any] = pred_prev_sample
lowerCamelCase__ : Optional[Any] = torch.sum(torch.abs(A ) )
lowerCamelCase__ : Dict = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1e-2
assert abs(result_mean.item() - 0.33_72 ) < 1e-3
def __lowerCamelCase ( self : List[Any] ) ->Union[str, Any]:
lowerCamelCase__ : Any = self.scheduler_classes[0]
lowerCamelCase__ : Dict = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase__ : List[str] = scheduler_class(**A )
lowerCamelCase__ : str = len(A )
lowerCamelCase__ : int = self.dummy_model()
lowerCamelCase__ : Any = self.dummy_sample_deter
lowerCamelCase__ : List[str] = torch.manual_seed(0 )
for t in reversed(range(A ) ):
# 1. predict noise residual
lowerCamelCase__ : Tuple = model(A , A )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ : int = scheduler.step(A , A , A , generator=A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase__ : Union[str, Any] = pred_prev_sample
lowerCamelCase__ : Optional[int] = torch.sum(torch.abs(A ) )
lowerCamelCase__ : Optional[Any] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1e-2
assert abs(result_mean.item() - 0.26_31 ) < 1e-3
def __lowerCamelCase ( self : int ) ->Tuple:
lowerCamelCase__ : List[Any] = self.scheduler_classes[0]
lowerCamelCase__ : int = self.get_scheduler_config()
lowerCamelCase__ : Tuple = scheduler_class(**A )
lowerCamelCase__ : Optional[Any] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=A )
lowerCamelCase__ : int = scheduler.timesteps
for i, timestep in enumerate(A ):
if i == len(A ) - 1:
lowerCamelCase__ : Any = -1
else:
lowerCamelCase__ : int = timesteps[i + 1]
lowerCamelCase__ : Optional[int] = scheduler.previous_timestep(A )
lowerCamelCase__ : Optional[int] = prev_t.item()
self.assertEqual(A , A )
def __lowerCamelCase ( self : str ) ->Optional[int]:
lowerCamelCase__ : List[Any] = self.scheduler_classes[0]
lowerCamelCase__ : Any = self.get_scheduler_config()
lowerCamelCase__ : int = scheduler_class(**A )
lowerCamelCase__ : Any = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(A , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=A )
def __lowerCamelCase ( self : Any ) ->str:
lowerCamelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCamelCase__ : List[Any] = self.get_scheduler_config()
lowerCamelCase__ : Tuple = scheduler_class(**A )
lowerCamelCase__ : List[str] = [1_0_0, 8_7, 5_0, 1, 0]
lowerCamelCase__ : Optional[int] = len(A )
with self.assertRaises(A , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=A , timesteps=A )
def __lowerCamelCase ( self : List[Any] ) ->Dict:
lowerCamelCase__ : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase__ : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase__ : str = scheduler_class(**A )
lowerCamelCase__ : Union[str, Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=A )
| 315
| 0
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __A :
def A__ ( self :Optional[int] , __snake_case :Any , __snake_case :str , __snake_case :Optional[int] ):
'''simple docstring'''
return None
class __A :
def A__ ( self :Dict , __snake_case :Union[str, Any] , __snake_case :Union[str, Any] , __snake_case :Optional[Any] , __snake_case :List[str] ):
'''simple docstring'''
return None
class __A ( unittest.TestCase ):
UpperCamelCase = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def A__ ( self :List[str] ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__snake_case , """tf""" , 12 , **__snake_case )
@require_torch
@slow
def A__ ( self :int ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__snake_case , """pt""" , 12 , **__snake_case )
@require_torch
@slow
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
from transformers import BertModel
__magic_name__ : List[str] =["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__snake_case ) )
vocab_file.flush()
__magic_name__ : Optional[Any] =BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
__magic_name__ : str =BertModel(BertConfig(vocab_size=len(__snake_case ) ) )
model.save_pretrained(__snake_case )
self._test_export(__snake_case , """pt""" , 12 , __snake_case )
@require_tf
@slow
def A__ ( self :List[Any] ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__magic_name__ : Dict =self._test_export(__snake_case , """tf""" , 12 , **__snake_case )
__magic_name__ : str =quantize(Path(__snake_case ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__snake_case ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__magic_name__ : Tuple =self._test_export(__snake_case , """pt""" , 12 , **__snake_case )
__magic_name__ : Tuple =quantize(__snake_case )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__snake_case ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def A__ ( self :List[str] , __snake_case :List[Any] , __snake_case :Dict , __snake_case :Union[str, Any] , __snake_case :str=None , **__snake_case :Tuple ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
__magic_name__ : Union[str, Any] =Path(__snake_case ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , **__snake_case )
return path
except Exception as e:
self.fail(__snake_case )
@require_torch
@require_tokenizers
@slow
def A__ ( self :int ):
'''simple docstring'''
from transformers import BertModel
__magic_name__ : Any =BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
__magic_name__ : int =BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__snake_case , __snake_case , """pt""" )
@require_tf
@require_tokenizers
@slow
def A__ ( self :Optional[int] ):
'''simple docstring'''
from transformers import TFBertModel
__magic_name__ : str =TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
__magic_name__ : int =BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__snake_case , __snake_case , """tf""" )
def A__ ( self :int , __snake_case :List[str] , __snake_case :Tuple , __snake_case :int ):
'''simple docstring'''
__magic_name__ : int =FeatureExtractionPipeline(__snake_case , __snake_case )
__magic_name__ : List[Any] =["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
__magic_name__ : List[Any] =infer_shapes(__snake_case , __snake_case )
# Assert all variables are present
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __snake_case )
self.assertSequenceEqual(variable_names[3:] , __snake_case )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : int =["""input_ids""", """attention_mask""", """token_type_ids"""]
__magic_name__ : str ={"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
__magic_name__ : str =ensure_valid_input(FuncContiguousArgs() , __snake_case , __snake_case )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__snake_case ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__snake_case ) , set(__snake_case ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__snake_case , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
__magic_name__ : int =ensure_valid_input(FuncNonContiguousArgs() , __snake_case , __snake_case )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__snake_case ) , 1 )
self.assertEqual(len(__snake_case ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : List[Any] =generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 708
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """pix2struct_text_model"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :List[Any] , __snake_case :Dict=5_02_44 , __snake_case :Dict=7_68 , __snake_case :Union[str, Any]=64 , __snake_case :Union[str, Any]=20_48 , __snake_case :List[Any]=12 , __snake_case :Any=12 , __snake_case :str=32 , __snake_case :Dict=1_28 , __snake_case :Optional[int]=0.1 , __snake_case :Any=1E-6 , __snake_case :Optional[int]=1.0 , __snake_case :List[Any]="gelu_new" , __snake_case :Any=0 , __snake_case :Tuple=False , __snake_case :Any=0 , __snake_case :int=1 , __snake_case :Dict=False , __snake_case :Optional[Any]=True , **__snake_case :Dict , ):
'''simple docstring'''
__magic_name__ : List[str] =vocab_size
__magic_name__ : List[str] =hidden_size
__magic_name__ : Optional[int] =d_kv
__magic_name__ : Dict =d_ff
__magic_name__ : Tuple =num_layers
__magic_name__ : str =num_heads
__magic_name__ : Optional[Any] =relative_attention_num_buckets
__magic_name__ : List[Any] =relative_attention_max_distance
__magic_name__ : str =dropout_rate
__magic_name__ : Optional[Any] =layer_norm_epsilon
__magic_name__ : Union[str, Any] =initializer_factor
__magic_name__ : Tuple =use_cache
__magic_name__ : Any =eos_token_id
__magic_name__ : int =decoder_start_token_id
# for backwards compatibility
__magic_name__ : Union[str, Any] =dense_act_fn
super().__init__(
pad_token_id=__snake_case , eos_token_id=__snake_case , decoder_start_token_id=__snake_case , tie_word_embeddings=__snake_case , is_decoder=__snake_case , **__snake_case , )
@classmethod
def A__ ( cls :Union[str, Any] , __snake_case :Union[str, os.PathLike] , **__snake_case :Any ):
'''simple docstring'''
cls._set_token_in_kwargs(__snake_case )
__magic_name__ , __magic_name__ : Optional[Any] =cls.get_config_dict(__snake_case , **__snake_case )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
__magic_name__ : Tuple =config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__snake_case , **__snake_case )
class __A ( UpperCamelCase__ ):
UpperCamelCase = """pix2struct_vision_model"""
def __init__( self :Union[str, Any] , __snake_case :Tuple=7_68 , __snake_case :Any=7_68 , __snake_case :Tuple=20_48 , __snake_case :Dict=64 , __snake_case :List[str]=12 , __snake_case :str=12 , __snake_case :str="gelu_new" , __snake_case :Optional[int]=1E-6 , __snake_case :Optional[int]=0.0 , __snake_case :Any=0.0 , __snake_case :Any=1E-10 , __snake_case :List[str]=1.0 , __snake_case :Tuple=40_96 , __snake_case :Optional[int]=32 , __snake_case :Union[str, Any]=1_28 , **__snake_case :Optional[int] , ):
'''simple docstring'''
super().__init__(**__snake_case )
__magic_name__ : Union[str, Any] =hidden_size
__magic_name__ : Dict =patch_embed_hidden_size
__magic_name__ : Tuple =d_ff
__magic_name__ : str =dropout_rate
__magic_name__ : str =num_hidden_layers
__magic_name__ : Dict =num_attention_heads
__magic_name__ : List[str] =initializer_range
__magic_name__ : Optional[int] =initializer_factor
__magic_name__ : str =attention_dropout
__magic_name__ : Union[str, Any] =layer_norm_eps
__magic_name__ : List[str] =dense_act_fn
__magic_name__ : List[str] =seq_len
__magic_name__ : str =relative_attention_num_buckets
__magic_name__ : Optional[int] =relative_attention_max_distance
__magic_name__ : List[str] =d_kv
@classmethod
def A__ ( cls :Optional[Any] , __snake_case :Union[str, os.PathLike] , **__snake_case :Any ):
'''simple docstring'''
cls._set_token_in_kwargs(__snake_case )
__magic_name__ , __magic_name__ : Optional[int] =cls.get_config_dict(__snake_case , **__snake_case )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
__magic_name__ : int =config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__snake_case , **__snake_case )
class __A ( UpperCamelCase__ ):
UpperCamelCase = """pix2struct"""
UpperCamelCase = True
def __init__( self :Union[str, Any] , __snake_case :Optional[int]=None , __snake_case :List[Any]=None , __snake_case :Optional[Any]=1.0 , __snake_case :Tuple=0.02 , __snake_case :str=False , __snake_case :List[str]=False , __snake_case :str=True , **__snake_case :List[str] , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=__snake_case , is_encoder_decoder=__snake_case , **__snake_case )
if text_config is None:
__magic_name__ : int ={}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
__magic_name__ : str ={}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
__magic_name__ : Union[str, Any] =PixaStructTextConfig(**__snake_case )
__magic_name__ : str =PixaStructVisionConfig(**__snake_case )
__magic_name__ : int =self.text_config.decoder_start_token_id
__magic_name__ : Optional[Any] =self.text_config.pad_token_id
__magic_name__ : str =self.text_config.eos_token_id
__magic_name__ : int =initializer_factor
__magic_name__ : List[Any] =initializer_range
__magic_name__ : Dict =self.initializer_range
__magic_name__ : Union[str, Any] =self.initializer_range
__magic_name__ : Tuple =is_vqa
@classmethod
def A__ ( cls :Optional[int] , __snake_case :PixaStructTextConfig , __snake_case :PixaStructVisionConfig , **__snake_case :List[str] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__snake_case )
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =copy.deepcopy(self.__dict__ )
__magic_name__ : Any =self.text_config.to_dict()
__magic_name__ : List[str] =self.vision_config.to_dict()
__magic_name__ : Optional[int] =self.__class__.model_type
return output
| 367
| 0
|
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[Any] = "T5Config"
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A_ = jnp.zeros_like(__UpperCamelCase )
A_ = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
A_ = shifted_input_ids.at[:, 0].set(__UpperCamelCase )
A_ = jnp.where(shifted_input_ids == -1_00 , __UpperCamelCase , __UpperCamelCase )
return shifted_input_ids
class __lowercase ( A ):
__magic_name__ : str = '''mt5'''
__magic_name__ : str = MTaConfig
class __lowercase ( A ):
__magic_name__ : List[str] = '''mt5'''
__magic_name__ : Optional[Any] = MTaConfig
class __lowercase ( A ):
__magic_name__ : Any = '''mt5'''
__magic_name__ : List[str] = MTaConfig
| 141
|
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __lowercase ( A , unittest.TestCase ):
__magic_name__ : Any = FlaxAutoencoderKL
@property
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = 4
A_ = 3
A_ = (3_2, 3_2)
A_ = jax.random.PRNGKey(0 )
A_ = jax.random.uniform(a__ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
A_ = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
A_ = self.dummy_input
return init_dict, inputs_dict
| 141
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 629
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Dict = old_name
if "patch_embed" in old_name:
a__ , a__ , a__ : Union[str, Any] = old_name.split("." )
if layer == "0":
a__ : Union[str, Any] = old_name.replace("0" , "convolution1" )
elif layer == "1":
a__ : Dict = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
a__ : List[str] = old_name.replace("3" , "convolution2" )
else:
a__ : Optional[Any] = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" , lowerCamelCase ):
a__ : List[str] = r"\b\d{2}\b"
if bool(re.search(lowerCamelCase , lowerCamelCase ) ):
a__ : Optional[int] = re.search(r"\d\.\d\d." , lowerCamelCase ).group()
else:
a__ : Any = re.search(r"\d\.\d." , lowerCamelCase ).group()
if int(match[0] ) < 6:
a__ : List[Any] = old_name.replace(lowerCamelCase , "" )
a__ : int = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
a__ : List[Any] = "intermediate_stages." + trimmed_name
else:
a__ : Union[str, Any] = old_name.replace(lowerCamelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
a__ : Optional[Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
a__ : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage )
a__ : str = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
a__ : List[str] = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
a__ : Optional[int] = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
a__ : List[str] = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
a__ : Any = trimmed_name.replace("fc2" , "linear_out" )
a__ : Any = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." , lowerCamelCase ):
a__ : List[str] = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
a__ : str = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
a__ : str = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
a__ : Any = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
a__ : Optional[int] = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
a__ : Tuple = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
a__ : Optional[int] = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
a__ : Tuple = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
a__ : Union[str, Any] = new_name.replace("norm" , "layernorm" )
a__ : Optional[int] = "efficientformer." + new_name
else:
a__ : List[Any] = "efficientformer.encoder." + new_name
return new_name
def _A ( lowerCamelCase , lowerCamelCase ):
for key in checkpoint.copy().keys():
a__ : Optional[Any] = checkpoint.pop(lowerCamelCase )
a__ : Dict = val
return checkpoint
def _A ( ):
a__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : List[str] = torch.load(lowerCamelCase , map_location="cpu" )["model"]
a__ : str = EfficientFormerConfig.from_json_file(lowerCamelCase )
a__ : int = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase )
a__ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
a__ : Tuple = config.depths[-1] - config.num_metaad_blocks + 1
a__ : Union[str, Any] = convert_torch_checkpoint(lowerCamelCase , lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
a__ : Dict = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
a__ : str = prepare_img()
a__ : Dict = 256
a__ : Union[str, Any] = 224
a__ : List[str] = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
a__ : List[str] = processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
a__ : List[str] = Compose(
[
Resize(lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(lowerCamelCase ),
ToTensor(),
Normalize(lowerCamelCase , lowerCamelCase ),
] )
a__ : List[Any] = image_transforms(lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(lowerCamelCase , lowerCamelCase )
a__ : Optional[int] = model(lowerCamelCase )
a__ : Any = outputs.logits
a__ : Optional[Any] = (1, 1000)
if "l1" in model_name:
a__ : Tuple = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
a__ : int = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
a__ : Optional[Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowerCamelCase )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=lowerCamelCase , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 629
| 1
|
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase : Union[str, Any] = """src/diffusers"""
lowercase : Union[str, Any] = """."""
# This is to make sure the diffusers module imported is the one in the repo.
lowercase : Optional[Any] = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase : int = spec.loader.load_module()
def lowerCamelCase__ ( __lowercase , __lowercase ):
return line.startswith(__lowercase ) or len(__lowercase ) <= 1 or re.search(r"""^\s*\)(\s*->.*:|:)\s*$""" , __lowercase ) is not None
def lowerCamelCase__ ( __lowercase ):
snake_case : Tuple = object_name.split(""".""" )
snake_case : Dict = 0
# First let's find the module where our object lives.
snake_case : Optional[Any] = parts[i]
while i < len(__lowercase ) and not os.path.isfile(os.path.join(__lowercase , F'''{module}.py''' ) ):
i += 1
if i < len(__lowercase ):
snake_case : Dict = os.path.join(__lowercase , parts[i] )
if i >= len(__lowercase ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(__lowercase , F'''{module}.py''' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case : int = f.readlines()
# Now let's find the class / func in the code!
snake_case : Optional[int] = """"""
snake_case : Dict = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowercase ) and re.search(rF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowercase ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
snake_case : Dict = line_index
while line_index < len(__lowercase ) and _should_continue(lines[line_index] , __lowercase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
snake_case : str = lines[start_index:line_index]
return "".join(__lowercase )
lowercase : Union[str, Any] = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
lowercase : Tuple = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
lowercase : List[Any] = re.compile(R"""<FILL\s+[^>]*>""")
def lowerCamelCase__ ( __lowercase ):
snake_case : Union[str, Any] = code.split("""\n""" )
snake_case : Dict = 0
while idx < len(__lowercase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowercase ):
return re.search(r"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def lowerCamelCase__ ( __lowercase ):
snake_case : List[str] = len(get_indent(__lowercase ) ) > 0
if has_indent:
snake_case : int = F'''class Bla:\n{code}'''
snake_case : str = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__lowercase )
snake_case : Union[str, Any] = black.format_str(__lowercase , mode=__lowercase )
snake_case , snake_case : int = style_docstrings_in_code(__lowercase )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def lowerCamelCase__ ( __lowercase , __lowercase=False ):
with open(__lowercase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case : int = f.readlines()
snake_case : Optional[Any] = []
snake_case : List[Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowercase ):
snake_case : str = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
snake_case , snake_case , snake_case : List[str] = search.groups()
snake_case : Optional[int] = find_code_in_diffusers(__lowercase )
snake_case : Dict = get_indent(__lowercase )
snake_case : int = line_index + 1 if indent == theoretical_indent else line_index + 2
snake_case : Any = theoretical_indent
snake_case : int = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
snake_case : Tuple = True
while line_index < len(__lowercase ) and should_continue:
line_index += 1
if line_index >= len(__lowercase ):
break
snake_case : str = lines[line_index]
snake_case : Tuple = _should_continue(__lowercase , __lowercase ) and re.search(F'''^{indent}# End copy''' , __lowercase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
snake_case : str = lines[start_index:line_index]
snake_case : Any = """""".join(__lowercase )
# Remove any nested `Copied from` comments to avoid circular copies
snake_case : Dict = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(__lowercase ) is None]
snake_case : Union[str, Any] = """\n""".join(__lowercase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowercase ) > 0:
snake_case : Optional[int] = replace_pattern.replace("""with""" , """""" ).split(""",""" )
snake_case : int = [_re_replace_pattern.search(__lowercase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
snake_case , snake_case , snake_case : Optional[Any] = pattern.groups()
snake_case : int = re.sub(__lowercase , __lowercase , __lowercase )
if option.strip() == "all-casing":
snake_case : List[str] = re.sub(obja.lower() , obja.lower() , __lowercase )
snake_case : Union[str, Any] = re.sub(obja.upper() , obja.upper() , __lowercase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
snake_case : Tuple = blackify(lines[start_index - 1] + theoretical_code )
snake_case : Tuple = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
snake_case : Union[str, Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
snake_case : Union[str, Any] = start_index + 1
if overwrite and len(__lowercase ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(__lowercase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowercase )
return diffs
def lowerCamelCase__ ( __lowercase = False ):
snake_case : Optional[int] = glob.glob(os.path.join(__lowercase , """**/*.py""" ) , recursive=__lowercase )
snake_case : Tuple = []
for filename in all_files:
snake_case : Union[str, Any] = is_copy_consistent(__lowercase , __lowercase )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(__lowercase ) > 0:
snake_case : Optional[Any] = """\n""".join(__lowercase )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowercase : List[str] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 116
|
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowercase : Tuple = sys.version_info >= (3, 10)
def lowerCamelCase__ ( __lowercase=None , __lowercase=None ):
return field(default_factory=lambda: default , metadata=__lowercase )
@dataclass
class _a :
'''simple docstring'''
lowerCAmelCase_ : int
lowerCAmelCase_ : float
lowerCAmelCase_ : str
lowerCAmelCase_ : bool
@dataclass
class _a :
'''simple docstring'''
lowerCAmelCase_ : int = 42
lowerCAmelCase_ : str = field(default="""toto""", metadata={"""help""": """help message"""} )
@dataclass
class _a :
'''simple docstring'''
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : bool = True
lowerCAmelCase_ : Optional[bool] = None
class _a (a__ ):
'''simple docstring'''
lowerCAmelCase_ : int = """titi"""
lowerCAmelCase_ : Optional[int] = """toto"""
class _a (a__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = """titi"""
lowerCAmelCase_ : Dict = """toto"""
lowerCAmelCase_ : int = 42
@dataclass
class _a :
'''simple docstring'''
lowerCAmelCase_ : BasicEnum = "toto"
def snake_case_ ( self ) -> str:
snake_case : Union[str, Any] = BasicEnum(self.foo )
@dataclass
class _a :
'''simple docstring'''
lowerCAmelCase_ : MixedTypeEnum = "toto"
def snake_case_ ( self ) -> Tuple:
snake_case : List[str] = MixedTypeEnum(self.foo )
@dataclass
class _a :
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[float] = field(default=a__, metadata={"""help""": """help message"""} )
lowerCAmelCase_ : Optional[str] = None
lowerCAmelCase_ : Optional[List[str]] = list_field(default=[] )
lowerCAmelCase_ : Optional[List[int]] = list_field(default=[] )
@dataclass
class _a :
'''simple docstring'''
lowerCAmelCase_ : List[int] = list_field(default=[] )
lowerCAmelCase_ : List[int] = list_field(default=[1, 2, 3] )
lowerCAmelCase_ : List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
lowerCAmelCase_ : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _a :
'''simple docstring'''
lowerCAmelCase_ : List[int] = field()
lowerCAmelCase_ : str = field()
lowerCAmelCase_ : BasicEnum = field()
def snake_case_ ( self ) -> int:
snake_case : Optional[int] = BasicEnum(self.required_enum )
@dataclass
class _a :
'''simple docstring'''
lowerCAmelCase_ : int
lowerCAmelCase_ : "BasicEnum" = field()
lowerCAmelCase_ : "Optional[bool]" = None
lowerCAmelCase_ : "str" = field(default="""toto""", metadata={"""help""": """help message"""} )
lowerCAmelCase_ : "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class _a :
'''simple docstring'''
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : bool = True
lowerCAmelCase_ : bool | None = None
@dataclass
class _a :
'''simple docstring'''
lowerCAmelCase_ : int | None = None
lowerCAmelCase_ : float | None = field(default=a__, metadata={"""help""": """help message"""} )
lowerCAmelCase_ : str | None = None
lowerCAmelCase_ : list[str] | None = list_field(default=[] )
lowerCAmelCase_ : list[int] | None = list_field(default=[] )
class _a (unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ,__a ,__a ) -> Optional[int]:
self.assertEqual(len(a._actions ) ,len(b._actions ) )
for x, y in zip(a._actions ,b._actions ):
snake_case : Optional[int] = {k: v for k, v in vars(__a ).items() if k != """container"""}
snake_case : Tuple = {k: v for k, v in vars(__a ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" ,__a ) and yy.get("""choices""" ,__a ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](__a ) ,yy["""type"""](__a ) )
del xx["type"], yy["type"]
self.assertEqual(__a ,__a )
def snake_case_ ( self ) -> int:
snake_case : str = HfArgumentParser(__a )
snake_case : Dict = argparse.ArgumentParser()
expected.add_argument("""--foo""" ,type=__a ,required=__a )
expected.add_argument("""--bar""" ,type=__a ,required=__a )
expected.add_argument("""--baz""" ,type=__a ,required=__a )
expected.add_argument("""--flag""" ,type=__a ,default=__a ,const=__a ,nargs="""?""" )
self.argparsersEqual(__a ,__a )
snake_case : Optional[int] = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((snake_case) , ) : Dict = parser.parse_args_into_dataclasses(__a ,look_for_args_file=__a )
self.assertFalse(example.flag )
def snake_case_ ( self ) -> int:
snake_case : Optional[int] = HfArgumentParser(__a )
snake_case : str = argparse.ArgumentParser()
expected.add_argument("""--foo""" ,default=42 ,type=__a )
expected.add_argument("""--baz""" ,default="""toto""" ,type=__a ,help="""help message""" )
self.argparsersEqual(__a ,__a )
def snake_case_ ( self ) -> Optional[Any]:
snake_case : Optional[int] = argparse.ArgumentParser()
expected.add_argument("""--foo""" ,type=__a ,default=__a ,const=__a ,nargs="""?""" )
expected.add_argument("""--baz""" ,type=__a ,default=__a ,const=__a ,nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" ,action="""store_false""" ,default=__a ,dest="""baz""" )
expected.add_argument("""--opt""" ,type=__a ,default=__a )
snake_case : str = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__a )
for dataclass_type in dataclass_types:
snake_case : Optional[int] = HfArgumentParser(__a )
self.argparsersEqual(__a ,__a )
snake_case : List[str] = parser.parse_args([] )
self.assertEqual(__a ,Namespace(foo=__a ,baz=__a ,opt=__a ) )
snake_case : str = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(__a ,Namespace(foo=__a ,baz=__a ,opt=__a ) )
snake_case : Union[str, Any] = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(__a ,Namespace(foo=__a ,baz=__a ,opt=__a ) )
snake_case : Union[str, Any] = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(__a ,Namespace(foo=__a ,baz=__a ,opt=__a ) )
snake_case : List[str] = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(__a ,Namespace(foo=__a ,baz=__a ,opt=__a ) )
def snake_case_ ( self ) -> str:
snake_case : Any = HfArgumentParser(__a )
snake_case : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" ,default="""toto""" ,choices=["""titi""", """toto""", 42] ,type=make_choice_type_function(["""titi""", """toto""", 42] ) ,)
self.argparsersEqual(__a ,__a )
snake_case : Any = parser.parse_args([] )
self.assertEqual(args.foo ,"""toto""" )
snake_case : str = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.toto )
snake_case : Optional[Any] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo ,"""titi""" )
snake_case : Optional[Any] = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.titi )
snake_case : Any = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo ,42 )
snake_case : Any = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.fourtytwo )
def snake_case_ ( self ) -> Union[str, Any]:
@dataclass
class _a :
'''simple docstring'''
lowerCAmelCase_ : Literal["titi", "toto", 42] = "toto"
snake_case : Tuple = HfArgumentParser(__a )
snake_case : List[Any] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" ,default="""toto""" ,choices=("""titi""", """toto""", 42) ,type=make_choice_type_function(["""titi""", """toto""", 42] ) ,)
self.argparsersEqual(__a ,__a )
snake_case : Optional[int] = parser.parse_args([] )
self.assertEqual(args.foo ,"""toto""" )
snake_case : int = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo ,"""titi""" )
snake_case : Optional[int] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo ,42 )
def snake_case_ ( self ) -> List[Any]:
snake_case : Any = HfArgumentParser(__a )
snake_case : Dict = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" ,nargs="""+""" ,default=[] ,type=__a )
expected.add_argument("""--bar_int""" ,nargs="""+""" ,default=[1, 2, 3] ,type=__a )
expected.add_argument("""--foo_str""" ,nargs="""+""" ,default=["""Hallo""", """Bonjour""", """Hello"""] ,type=__a )
expected.add_argument("""--foo_float""" ,nargs="""+""" ,default=[0.1, 0.2, 0.3] ,type=__a )
self.argparsersEqual(__a ,__a )
snake_case : int = parser.parse_args([] )
self.assertEqual(
__a ,Namespace(foo_int=[] ,bar_int=[1, 2, 3] ,foo_str=["""Hallo""", """Bonjour""", """Hello"""] ,foo_float=[0.1, 0.2, 0.3] ) ,)
snake_case : Optional[int] = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(__a ,Namespace(foo_int=[1] ,bar_int=[2, 3] ,foo_str=["""a""", """b""", """c"""] ,foo_float=[0.1, 0.7] ) )
def snake_case_ ( self ) -> Optional[int]:
snake_case : List[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" ,default=__a ,type=__a )
expected.add_argument("""--bar""" ,default=__a ,type=__a ,help="""help message""" )
expected.add_argument("""--baz""" ,default=__a ,type=__a )
expected.add_argument("""--ces""" ,nargs="""+""" ,default=[] ,type=__a )
expected.add_argument("""--des""" ,nargs="""+""" ,default=[] ,type=__a )
snake_case : Optional[Any] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__a )
for dataclass_type in dataclass_types:
snake_case : Any = HfArgumentParser(__a )
self.argparsersEqual(__a ,__a )
snake_case : Any = parser.parse_args([] )
self.assertEqual(__a ,Namespace(foo=__a ,bar=__a ,baz=__a ,ces=[] ,des=[] ) )
snake_case : Any = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(__a ,Namespace(foo=12 ,bar=3.14 ,baz="""42""" ,ces=["""a""", """b""", """c"""] ,des=[1, 2, 3] ) )
def snake_case_ ( self ) -> Any:
snake_case : Optional[Any] = HfArgumentParser(__a )
snake_case : List[Any] = argparse.ArgumentParser()
expected.add_argument("""--required_list""" ,nargs="""+""" ,type=__a ,required=__a )
expected.add_argument("""--required_str""" ,type=__a ,required=__a )
expected.add_argument(
"""--required_enum""" ,type=make_choice_type_function(["""titi""", """toto"""] ) ,choices=["""titi""", """toto"""] ,required=__a ,)
self.argparsersEqual(__a ,__a )
def snake_case_ ( self ) -> List[Any]:
snake_case : Optional[int] = HfArgumentParser(__a )
snake_case : Any = argparse.ArgumentParser()
expected.add_argument("""--foo""" ,type=__a ,required=__a )
expected.add_argument(
"""--required_enum""" ,type=make_choice_type_function(["""titi""", """toto"""] ) ,choices=["""titi""", """toto"""] ,required=__a ,)
expected.add_argument("""--opt""" ,type=__a ,default=__a )
expected.add_argument("""--baz""" ,default="""toto""" ,type=__a ,help="""help message""" )
expected.add_argument("""--foo_str""" ,nargs="""+""" ,default=["""Hallo""", """Bonjour""", """Hello"""] ,type=__a )
self.argparsersEqual(__a ,__a )
def snake_case_ ( self ) -> Tuple:
snake_case : str = HfArgumentParser(__a )
snake_case : Any = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
snake_case : List[str] = parser.parse_dict(__a )[0]
snake_case : str = BasicExample(**__a )
self.assertEqual(__a ,__a )
def snake_case_ ( self ) -> Any:
snake_case : Tuple = HfArgumentParser(__a )
snake_case : int = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(__a ,parser.parse_dict ,__a ,allow_extra_keys=__a )
def snake_case_ ( self ) -> Optional[int]:
snake_case : List[str] = HfArgumentParser(__a )
snake_case : Any = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Dict = os.path.join(__a ,"""temp_json""" )
os.mkdir(__a )
with open(temp_local_path + """.json""" ,"""w+""" ) as f:
json.dump(__a ,__a )
snake_case : Any = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
snake_case : List[str] = BasicExample(**__a )
self.assertEqual(__a ,__a )
def snake_case_ ( self ) -> str:
snake_case : Union[str, Any] = HfArgumentParser(__a )
snake_case : Any = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : str = os.path.join(__a ,"""temp_yaml""" )
os.mkdir(__a )
with open(temp_local_path + """.yaml""" ,"""w+""" ) as f:
yaml.dump(__a ,__a )
snake_case : Union[str, Any] = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
snake_case : Tuple = BasicExample(**__a )
self.assertEqual(__a ,__a )
def snake_case_ ( self ) -> List[str]:
snake_case : Union[str, Any] = HfArgumentParser(__a )
self.assertIsNotNone(__a )
| 116
| 1
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Tuple = "ClapFeatureExtractor"
_UpperCamelCase : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
_lowercase : str = kwargs.pop('sampling_rate' , _lowerCAmelCase )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
_lowercase : Dict = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if audios is not None:
_lowercase : Any = self.feature_extractor(
_lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and audios is not None:
_lowercase : Union[str, Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __a ( self ):
_lowercase : Dict = self.tokenizer.model_input_names
_lowercase : Any = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 712
|
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase = typing.Union[np.floataa, int, float] # noqa: UP007
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(SCREAMING_SNAKE_CASE ) - np.asarray(SCREAMING_SNAKE_CASE )) ** 2 ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ** (1 / 2)
if __name__ == "__main__":
def __magic_name__ ( ) -> None:
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) )
benchmark()
| 677
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class UpperCamelCase( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : int ) -> int:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=SCREAMING_SNAKE_CASE , )
assert hasattr(self , "env" )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case = f'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}'''
# distributed data settings
__snake_case = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=SCREAMING_SNAKE_CASE , instance_count=SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=SCREAMING_SNAKE_CASE , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=SCREAMING_SNAKE_CASE , py_version="py36" , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
TrainingJobAnalytics(SCREAMING_SNAKE_CASE ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.create_estimator(SCREAMING_SNAKE_CASE )
# run training
estimator.fit()
# result dataframe
__snake_case = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__snake_case = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__snake_case = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__snake_case = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , SCREAMING_SNAKE_CASE )
| 371
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class UpperCamelCase( _a , unittest.TestCase ):
snake_case_ : Any = PriorTransformer
snake_case_ : List[str] = """hidden_states"""
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> str:
'''simple docstring'''
__snake_case = 4
__snake_case = 8
__snake_case = 7
__snake_case = floats_tensor((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
__snake_case = floats_tensor((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
__snake_case = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : Optional[Any]=0 ) -> Any:
'''simple docstring'''
torch.manual_seed(SCREAMING_SNAKE_CASE )
__snake_case = 4
__snake_case = 8
__snake_case = 7
__snake_case = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
__snake_case = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
__snake_case = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> str:
'''simple docstring'''
return (4, 8)
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
'''simple docstring'''
return (4, 8)
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Dict:
'''simple docstring'''
__snake_case = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Dict:
'''simple docstring'''
__snake_case , __snake_case = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(SCREAMING_SNAKE_CASE )
__snake_case = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
__snake_case , __snake_case = self.prepare_init_args_and_inputs_for_common()
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
__snake_case = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
__snake_case = model.to(SCREAMING_SNAKE_CASE )
if hasattr(SCREAMING_SNAKE_CASE , "set_default_attn_processor" ):
model.set_default_attn_processor()
__snake_case = self.get_dummy_seed_input()
with torch.no_grad():
__snake_case = model(**SCREAMING_SNAKE_CASE )[0]
__snake_case = output[0, :5].flatten().cpu()
print(SCREAMING_SNAKE_CASE )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__snake_case = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , rtol=1e-2 ) )
@slow
class UpperCamelCase( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE : int=1 , SCREAMING_SNAKE_CASE : str=7_6_8 , SCREAMING_SNAKE_CASE : Dict=7_7 , SCREAMING_SNAKE_CASE : int=0 ) -> List[str]:
'''simple docstring'''
torch.manual_seed(SCREAMING_SNAKE_CASE )
__snake_case = batch_size
__snake_case = embedding_dim
__snake_case = num_embeddings
__snake_case = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
__snake_case = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
__snake_case = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[3_7, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
'''simple docstring'''
__snake_case = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" )
model.to(SCREAMING_SNAKE_CASE )
__snake_case = self.get_dummy_seed_input(seed=SCREAMING_SNAKE_CASE )
with torch.no_grad():
__snake_case = model(**SCREAMING_SNAKE_CASE )[0]
assert list(sample.shape ) == [1, 7_6_8]
__snake_case = sample[0, :8].flatten().cpu()
print(SCREAMING_SNAKE_CASE )
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE )
assert torch_all_close(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 )
| 371
| 1
|
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_lowerCamelCase = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
_lowerCamelCase = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
_lowerCamelCase = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def lowerCAmelCase_ ( lowercase_ : Tuple ):
'''simple docstring'''
def remove_articles(lowercase_ : int ):
__SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(lowercase_ , ''' ''' , lowercase_ )
def white_space_fix(lowercase_ : int ):
return " ".join(text.split() )
def remove_punc(lowercase_ : Tuple ):
__SCREAMING_SNAKE_CASE : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase_ : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase_ ) ) ) )
def lowerCAmelCase_ ( lowercase_ : Tuple , lowercase_ : Optional[int] ):
'''simple docstring'''
return int(normalize_answer(lowercase_ ) == normalize_answer(lowercase_ ) )
def lowerCAmelCase_ ( lowercase_ : List[Any] , lowercase_ : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = [any(compute_exact(lowercase_ , lowercase_ ) for ref in refs ) for pred, refs in zip(lowercase_ , lowercase_ )]
return (sum(lowercase_ ) / len(lowercase_ )) * 100
def lowerCAmelCase_ ( lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = [rgram for rgrams in rgramslist for rgram in rgrams]
__SCREAMING_SNAKE_CASE : str = Counter(lowercase_ )
__SCREAMING_SNAKE_CASE : str = Counter(lowercase_ )
__SCREAMING_SNAKE_CASE : Tuple = Counter()
for sgram, scount in sgramcounter.items():
__SCREAMING_SNAKE_CASE : Any = scount * numref
__SCREAMING_SNAKE_CASE : Union[str, Any] = Counter(lowercase_ )
__SCREAMING_SNAKE_CASE : str = Counter()
for cgram, ccount in cgramcounter.items():
__SCREAMING_SNAKE_CASE : Optional[Any] = ccount * numref
# KEEP
__SCREAMING_SNAKE_CASE : Optional[int] = sgramcounter_rep & cgramcounter_rep
__SCREAMING_SNAKE_CASE : Optional[int] = keepgramcounter_rep & rgramcounter
__SCREAMING_SNAKE_CASE : Any = sgramcounter_rep & rgramcounter
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : List[str] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
__SCREAMING_SNAKE_CASE : List[Any] = 1
if len(lowercase_ ) > 0:
__SCREAMING_SNAKE_CASE : Any = keeptmpscorea / len(lowercase_ )
if len(lowercase_ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
__SCREAMING_SNAKE_CASE : int = keeptmpscorea / sum(keepgramcounterall_rep.values() )
__SCREAMING_SNAKE_CASE : Tuple = 0
if keepscore_precision > 0 or keepscore_recall > 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
__SCREAMING_SNAKE_CASE : int = sgramcounter_rep - cgramcounter_rep
__SCREAMING_SNAKE_CASE : Union[str, Any] = delgramcounter_rep - rgramcounter
__SCREAMING_SNAKE_CASE : Optional[Any] = sgramcounter_rep - rgramcounter
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Dict = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__SCREAMING_SNAKE_CASE : Dict = 1
if len(lowercase_ ) > 0:
__SCREAMING_SNAKE_CASE : Any = deltmpscorea / len(lowercase_ )
# ADDITION
__SCREAMING_SNAKE_CASE : Dict = set(lowercase_ ) - set(lowercase_ )
__SCREAMING_SNAKE_CASE : List[str] = set(lowercase_ ) & set(lowercase_ )
__SCREAMING_SNAKE_CASE : Optional[int] = set(lowercase_ ) - set(lowercase_ )
__SCREAMING_SNAKE_CASE : Tuple = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__SCREAMING_SNAKE_CASE : Optional[int] = 1
__SCREAMING_SNAKE_CASE : int = 1
if len(lowercase_ ) > 0:
__SCREAMING_SNAKE_CASE : Tuple = addtmpscore / len(lowercase_ )
if len(lowercase_ ) > 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = addtmpscore / len(lowercase_ )
__SCREAMING_SNAKE_CASE : List[Any] = 0
if addscore_precision > 0 or addscore_recall > 0:
__SCREAMING_SNAKE_CASE : Dict = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowerCAmelCase_ ( lowercase_ : Any , lowercase_ : str , lowercase_ : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : str = len(lowercase_ )
__SCREAMING_SNAKE_CASE : int = ssent.split(''' ''' )
__SCREAMING_SNAKE_CASE : List[str] = csent.split(''' ''' )
__SCREAMING_SNAKE_CASE : Dict = []
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : List[str] = []
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : Tuple = []
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : Any = []
for rsent in rsents:
__SCREAMING_SNAKE_CASE : str = rsent.split(''' ''' )
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : Any = []
ragramslist.append(lowercase_ )
for i in range(0 , len(lowercase_ ) - 1 ):
if i < len(lowercase_ ) - 1:
__SCREAMING_SNAKE_CASE : Optional[int] = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(lowercase_ )
if i < len(lowercase_ ) - 2:
__SCREAMING_SNAKE_CASE : Dict = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(lowercase_ )
if i < len(lowercase_ ) - 3:
__SCREAMING_SNAKE_CASE : List[str] = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(lowercase_ )
ragramslist.append(lowercase_ )
ragramslist.append(lowercase_ )
ragramslist.append(lowercase_ )
for i in range(0 , len(lowercase_ ) - 1 ):
if i < len(lowercase_ ) - 1:
__SCREAMING_SNAKE_CASE : Union[str, Any] = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(lowercase_ )
if i < len(lowercase_ ) - 2:
__SCREAMING_SNAKE_CASE : Union[str, Any] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(lowercase_ )
if i < len(lowercase_ ) - 3:
__SCREAMING_SNAKE_CASE : int = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(lowercase_ )
for i in range(0 , len(lowercase_ ) - 1 ):
if i < len(lowercase_ ) - 1:
__SCREAMING_SNAKE_CASE : List[str] = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(lowercase_ )
if i < len(lowercase_ ) - 2:
__SCREAMING_SNAKE_CASE : Any = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(lowercase_ )
if i < len(lowercase_ ) - 3:
__SCREAMING_SNAKE_CASE : Any = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(lowercase_ )
(__SCREAMING_SNAKE_CASE) : int = SARIngram(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
(__SCREAMING_SNAKE_CASE) : Optional[int] = SARIngram(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
(__SCREAMING_SNAKE_CASE) : int = SARIngram(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
(__SCREAMING_SNAKE_CASE) : Tuple = SARIngram(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__SCREAMING_SNAKE_CASE : int = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
__SCREAMING_SNAKE_CASE : List[str] = sum([delascore, delascore, delascore, delascore] ) / 4
__SCREAMING_SNAKE_CASE : int = sum([addascore, addascore, addascore, addascore] ) / 4
__SCREAMING_SNAKE_CASE : List[Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowerCAmelCase_ ( lowercase_ : Optional[Any] , lowercase_ : bool = True , lowercase_ : str = "13a" , lowercase_ : bool = True ):
'''simple docstring'''
if lowercase:
__SCREAMING_SNAKE_CASE : Union[str, Any] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
__SCREAMING_SNAKE_CASE : List[str] = sacrebleu.metrics.bleu._get_tokenizer(lowercase_ )()(lowercase_ )
else:
__SCREAMING_SNAKE_CASE : List[str] = sacrebleu.TOKENIZERS[tokenizer]()(lowercase_ )
elif tokenizer == "moses":
__SCREAMING_SNAKE_CASE : Tuple = sacremoses.MosesTokenizer().tokenize(lowercase_ , return_str=lowercase_ , escape=lowercase_ )
elif tokenizer == "penn":
__SCREAMING_SNAKE_CASE : Optional[Any] = sacremoses.MosesTokenizer().penn_tokenize(lowercase_ , return_str=lowercase_ )
else:
__SCREAMING_SNAKE_CASE : str = sentence
if not return_str:
__SCREAMING_SNAKE_CASE : List[str] = normalized_sent.split()
return normalized_sent
def lowerCAmelCase_ ( lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : int ):
'''simple docstring'''
if not (len(lowercase_ ) == len(lowercase_ ) == len(lowercase_ )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
for src, pred, refs in zip(lowercase_ , lowercase_ , lowercase_ ):
sari_score += SARIsent(normalize(lowercase_ ) , normalize(lowercase_ ) , [normalize(lowercase_ ) for sent in refs] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = sari_score / len(lowercase_ )
return 100 * sari_score
def lowerCAmelCase_ ( lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[str]="exp" , lowercase_ : Dict=None , lowercase_ : int=False , lowercase_ : Dict=False , lowercase_ : int=False , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = len(references[0] )
if any(len(lowercase_ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__SCREAMING_SNAKE_CASE : List[Any] = [[refs[i] for refs in references] for i in range(lowercase_ )]
__SCREAMING_SNAKE_CASE : List[Any] = sacrebleu.corpus_bleu(
lowercase_ , lowercase_ , smooth_method=lowercase_ , smooth_value=lowercase_ , force=lowercase_ , lowercase=lowercase_ , use_effective_order=lowercase_ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def SCREAMING_SNAKE_CASE_ ( self :int , _lowerCamelCase :int , _lowerCamelCase :int , _lowerCamelCase :List[str] ):
__SCREAMING_SNAKE_CASE : List[Any] = {}
result.update({'''sari''': compute_sari(sources=_lowerCamelCase , predictions=_lowerCamelCase , references=_lowerCamelCase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=_lowerCamelCase , references=_lowerCamelCase )} )
result.update({'''exact''': compute_em(predictions=_lowerCamelCase , references=_lowerCamelCase )} )
return result
| 700
|
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
_lowerCamelCase = '''path-to-your-trained-model'''
_lowerCamelCase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
_lowerCamelCase = '''A photo of sks dog in a bucket'''
_lowerCamelCase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 401
| 0
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
A__: List[str] = logging.getLogger(__name__)
A__: Optional[int] = {'''facebook/bart-base''': BartForConditionalGeneration}
A__: Tuple = {'''facebook/bart-base''': BartTokenizer}
def lowerCAmelCase_ ( ):
UpperCamelCase__: int = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph.")
parser.add_argument(
"--validation_file" ,type=A_ ,default=A_ ,help="A csv or a json file containing the validation data.")
parser.add_argument(
"--max_length" ,type=A_ ,default=5 ,help="The maximum total input sequence length after tokenization." ,)
parser.add_argument(
"--num_beams" ,type=A_ ,default=A_ ,help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) ,)
parser.add_argument(
"--model_name_or_path" ,type=A_ ,help="Path to pretrained model or model identifier from huggingface.co/models." ,required=A_ ,)
parser.add_argument(
"--config_name" ,type=A_ ,default=A_ ,help="Pretrained config name or path if not the same as model_name" ,)
parser.add_argument(
"--device" ,type=A_ ,default="cpu" ,help="Device where the model will be run" ,)
parser.add_argument("--output_file_path" ,type=A_ ,default=A_ ,help="Where to store the final ONNX file.")
UpperCamelCase__: Tuple = parser.parse_args()
return args
def lowerCAmelCase_ ( A_ ,A_="cpu"):
UpperCamelCase__: Union[str, Any] = model_dict[model_name].from_pretrained(A_).to(A_)
UpperCamelCase__: List[Any] = tokenizer_dict[model_name].from_pretrained(A_)
if model_name in ["facebook/bart-base"]:
UpperCamelCase__: int = 0
UpperCamelCase__: Any = None
UpperCamelCase__: Any = 0
return huggingface_model, tokenizer
def lowerCAmelCase_ ( A_ ,A_ ,A_ ,A_ ,A_):
model.eval()
UpperCamelCase__: str = None
UpperCamelCase__: int = torch.jit.script(BARTBeamSearchGenerator(A_))
with torch.no_grad():
UpperCamelCase__: Optional[int] = "My friends are cool but they eat too many carbs."
UpperCamelCase__: Dict = tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=10_24 ,return_tensors="pt").to(model.device)
UpperCamelCase__: List[str] = model.generate(
inputs["input_ids"] ,attention_mask=inputs["attention_mask"] ,num_beams=A_ ,max_length=A_ ,early_stopping=A_ ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
A_ ,(
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,A_ ,opset_version=14 ,input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] ,output_names=["output_ids"] ,dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} ,example_outputs=A_ ,)
logger.info("Model exported to {}".format(A_))
UpperCamelCase__: Optional[Any] = remove_dup_initializers(os.path.abspath(A_))
logger.info("Deduplicated and optimized model written to {}".format(A_))
UpperCamelCase__: Optional[Any] = onnxruntime.InferenceSession(A_)
UpperCamelCase__: List[str] = ort_sess.run(
A_ ,{
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(A_),
"max_length": np.array(A_),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1e-3 ,atol=1e-3)
logger.info("Model outputs from torch and ONNX Runtime are similar.")
logger.info("Success.")
def lowerCAmelCase_ ( ):
UpperCamelCase__: Optional[Any] = parse_args()
UpperCamelCase__: int = 5
UpperCamelCase__: Optional[int] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,level=logging.INFO ,)
logger.setLevel(logging.INFO)
transformers.utils.logging.set_verbosity_error()
UpperCamelCase__: Optional[Any] = torch.device(args.device)
UpperCamelCase__ , UpperCamelCase__: str = load_model_tokenizer(args.model_name_or_path ,A_)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
model.to(A_)
if args.max_length:
UpperCamelCase__: Union[str, Any] = args.max_length
if args.num_beams:
UpperCamelCase__: Tuple = args.num_beams
if args.output_file_path:
UpperCamelCase__: Optional[int] = args.output_file_path
else:
UpperCamelCase__: str = "BART.onnx"
logger.info("Exporting model to ONNX")
export_and_validate_model(A_ ,A_ ,A_ ,A_ ,A_)
if __name__ == "__main__":
main()
| 380
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
A__: Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__: Any = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
A__: Tuple = {
'''google/electra-small-generator''': 512,
'''google/electra-base-generator''': 512,
'''google/electra-large-generator''': 512,
'''google/electra-small-discriminator''': 512,
'''google/electra-base-discriminator''': 512,
'''google/electra-large-discriminator''': 512,
}
A__: List[Any] = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ElectraTokenizer
def __init__( self: List[str] , __lowerCamelCase: int=None , __lowerCamelCase: List[str]=None , __lowerCamelCase: str=True , __lowerCamelCase: Dict="[UNK]" , __lowerCamelCase: Tuple="[SEP]" , __lowerCamelCase: Optional[int]="[PAD]" , __lowerCamelCase: str="[CLS]" , __lowerCamelCase: List[str]="[MASK]" , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Optional[Any]=None , **__lowerCamelCase: str , ):
'''simple docstring'''
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase__: Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __lowerCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __lowerCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __lowerCamelCase ) != tokenize_chinese_chars
):
UpperCamelCase__: Any = getattr(__lowerCamelCase , normalizer_state.pop("type" ) )
UpperCamelCase__: Union[str, Any] = do_lower_case
UpperCamelCase__: str = strip_accents
UpperCamelCase__: Any = tokenize_chinese_chars
UpperCamelCase__: Optional[Any] = normalizer_class(**__lowerCamelCase )
UpperCamelCase__: Union[str, Any] = do_lower_case
def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: List[str] , __lowerCamelCase: Tuple=None ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase__: Tuple = [self.sep_token_id]
UpperCamelCase__: Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 380
| 1
|
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
def __init__( self , a , a , a , a , a , a=0.2 , a=0.2) -> Any:
SCREAMING_SNAKE_CASE = bp_numa
SCREAMING_SNAKE_CASE = bp_numa
SCREAMING_SNAKE_CASE = bp_numa
SCREAMING_SNAKE_CASE = conva_get[:2]
SCREAMING_SNAKE_CASE = conva_get[2]
SCREAMING_SNAKE_CASE = size_pa
SCREAMING_SNAKE_CASE = rate_w
SCREAMING_SNAKE_CASE = rate_t
SCREAMING_SNAKE_CASE = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
SCREAMING_SNAKE_CASE = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
SCREAMING_SNAKE_CASE = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
SCREAMING_SNAKE_CASE = -2 * np.random.rand(self.conva[1]) + 1
SCREAMING_SNAKE_CASE = -2 * np.random.rand(self.num_bpa) + 1
SCREAMING_SNAKE_CASE = -2 * np.random.rand(self.num_bpa) + 1
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
SCREAMING_SNAKE_CASE = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(lowerCamelCase_ , 'wb') as f:
pickle.dump(lowerCamelCase_ , lowerCamelCase_)
print(f'''Model saved: {save_path}''')
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , a) -> Tuple:
with open(lowerCamelCase_ , 'rb') as f:
SCREAMING_SNAKE_CASE = pickle.load(lowerCamelCase_) # noqa: S301
SCREAMING_SNAKE_CASE = model_dic.get('conv1')
conv_get.append(model_dic.get('step_conv1'))
SCREAMING_SNAKE_CASE = model_dic.get('size_pooling1')
SCREAMING_SNAKE_CASE = model_dic.get('num_bp1')
SCREAMING_SNAKE_CASE = model_dic.get('num_bp2')
SCREAMING_SNAKE_CASE = model_dic.get('num_bp3')
SCREAMING_SNAKE_CASE = model_dic.get('rate_weight')
SCREAMING_SNAKE_CASE = model_dic.get('rate_thre')
# create model instance
SCREAMING_SNAKE_CASE = CNN(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# modify model parameter
SCREAMING_SNAKE_CASE = model_dic.get('w_conv1')
SCREAMING_SNAKE_CASE = model_dic.get('wkj')
SCREAMING_SNAKE_CASE = model_dic.get('vji')
SCREAMING_SNAKE_CASE = model_dic.get('thre_conv1')
SCREAMING_SNAKE_CASE = model_dic.get('thre_bp2')
SCREAMING_SNAKE_CASE = model_dic.get('thre_bp3')
return conv_ins
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]:
return 1 / (1 + np.exp(-1 * x))
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[Any]:
return round(lowerCamelCase_ , 3)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a) -> str:
SCREAMING_SNAKE_CASE = convs[0]
SCREAMING_SNAKE_CASE = convs[1]
SCREAMING_SNAKE_CASE = np.shape(lowerCamelCase_)[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE = []
for i_focus in range(0 , size_data - size_conv + 1 , lowerCamelCase_):
for j_focus in range(0 , size_data - size_conv + 1 , lowerCamelCase_):
SCREAMING_SNAKE_CASE = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCamelCase_)
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = int((size_data - size_conv) / conv_step + 1)
for i_map in range(lowerCamelCase_):
SCREAMING_SNAKE_CASE = []
for i_focus in range(len(lowerCamelCase_)):
SCREAMING_SNAKE_CASE = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCamelCase_))
SCREAMING_SNAKE_CASE = np.asmatrix(lowerCamelCase_).reshape(
lowerCamelCase_ , lowerCamelCase_)
data_featuremap.append(lowerCamelCase_)
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCamelCase_))
SCREAMING_SNAKE_CASE = np.asarray(lowerCamelCase_)
return focus_list, data_featuremap
def SCREAMING_SNAKE_CASE__ ( self , a , a , a="average_pool") -> str:
SCREAMING_SNAKE_CASE = len(featuremaps[0])
SCREAMING_SNAKE_CASE = int(size_map / size_pooling)
SCREAMING_SNAKE_CASE = []
for i_map in range(len(lowerCamelCase_)):
SCREAMING_SNAKE_CASE = featuremaps[i_map]
SCREAMING_SNAKE_CASE = []
for i_focus in range(0 , lowerCamelCase_ , lowerCamelCase_):
for j_focus in range(0 , lowerCamelCase_ , lowerCamelCase_):
SCREAMING_SNAKE_CASE = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCamelCase_))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCamelCase_))
SCREAMING_SNAKE_CASE = np.asmatrix(lowerCamelCase_).reshape(lowerCamelCase_ , lowerCamelCase_)
featuremap_pooled.append(lowerCamelCase_)
return featuremap_pooled
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[Any]:
SCREAMING_SNAKE_CASE = []
for i in range(len(lowerCamelCase_)):
SCREAMING_SNAKE_CASE = np.shape(data[i])
SCREAMING_SNAKE_CASE = data[i].reshape(1 , shapes[0] * shapes[1])
SCREAMING_SNAKE_CASE = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCamelCase_)
SCREAMING_SNAKE_CASE = np.asarray(lowerCamelCase_)
return data_expanded
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[int]:
SCREAMING_SNAKE_CASE = np.asarray(lowerCamelCase_)
SCREAMING_SNAKE_CASE = np.shape(lowerCamelCase_)
SCREAMING_SNAKE_CASE = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a) -> Optional[int]:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
for i_map in range(lowerCamelCase_):
SCREAMING_SNAKE_CASE = np.ones((size_map, size_map))
for i in range(0 , lowerCamelCase_ , lowerCamelCase_):
for j in range(0 , lowerCamelCase_ , lowerCamelCase_):
SCREAMING_SNAKE_CASE = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE = i_pool + 1
SCREAMING_SNAKE_CASE = np.multiply(
lowerCamelCase_ , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(lowerCamelCase_)
return pd_all
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a=bool) -> str:
print('----------------------Start Training-------------------------')
print((' - - Shape: Train_Data ', np.shape(lowerCamelCase_)))
print((' - - Shape: Teach_Data ', np.shape(lowerCamelCase_)))
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 1_0000
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE = 0
print(f'''-------------Learning Time {rp}--------------''')
for p in range(len(lowerCamelCase_)):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE = np.asmatrix(datas_train[p])
SCREAMING_SNAKE_CASE = np.asarray(datas_teach[p])
SCREAMING_SNAKE_CASE = self.convolute(
lowerCamelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE = self.pooling(lowerCamelCase_ , self.size_poolinga)
SCREAMING_SNAKE_CASE = np.shape(lowerCamelCase_)
SCREAMING_SNAKE_CASE = self._expand(lowerCamelCase_)
SCREAMING_SNAKE_CASE = data_bp_input
SCREAMING_SNAKE_CASE = np.dot(lowerCamelCase_ , self.vji.T) - self.thre_bpa
SCREAMING_SNAKE_CASE = self.sig(lowerCamelCase_)
SCREAMING_SNAKE_CASE = np.dot(lowerCamelCase_ , self.wkj.T) - self.thre_bpa
SCREAMING_SNAKE_CASE = self.sig(lowerCamelCase_)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE = np.multiply(
(data_teach - bp_outa) , np.multiply(lowerCamelCase_ , (1 - bp_outa)))
SCREAMING_SNAKE_CASE = np.multiply(
np.dot(lowerCamelCase_ , self.wkj) , np.multiply(lowerCamelCase_ , (1 - bp_outa)))
SCREAMING_SNAKE_CASE = np.dot(lowerCamelCase_ , self.vji)
SCREAMING_SNAKE_CASE = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE = self._calculate_gradient_from_pool(
lowerCamelCase_ , lowerCamelCase_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
SCREAMING_SNAKE_CASE = self._expand_mat(pd_conva_all[k_conv])
SCREAMING_SNAKE_CASE = self.rate_weight * np.dot(lowerCamelCase_ , lowerCamelCase_)
SCREAMING_SNAKE_CASE = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
SCREAMING_SNAKE_CASE = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE = rp + 1
SCREAMING_SNAKE_CASE = error_count / patterns
all_mse.append(lowerCamelCase_)
def draw_error():
SCREAMING_SNAKE_CASE = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(lowerCamelCase_ , '+-')
plt.plot(lowerCamelCase_ , 'r--')
plt.xlabel('Learning Times')
plt.ylabel('All_mse')
plt.grid(lowerCamelCase_ , alpha=0.5)
plt.show()
print('------------------Training Complished---------------------')
print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}'''))
if draw_e:
draw_error()
return mse
def SCREAMING_SNAKE_CASE__ ( self , a) -> Tuple:
SCREAMING_SNAKE_CASE = []
print('-------------------Start Testing-------------------------')
print((' - - Shape: Test_Data ', np.shape(lowerCamelCase_)))
for p in range(len(lowerCamelCase_)):
SCREAMING_SNAKE_CASE = np.asmatrix(datas_test[p])
SCREAMING_SNAKE_CASE = self.convolute(
lowerCamelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE = self.pooling(lowerCamelCase_ , self.size_poolinga)
SCREAMING_SNAKE_CASE = self._expand(lowerCamelCase_)
SCREAMING_SNAKE_CASE = data_bp_input
SCREAMING_SNAKE_CASE = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE = self.sig(lowerCamelCase_)
SCREAMING_SNAKE_CASE = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE = self.sig(lowerCamelCase_)
produce_out.extend(bp_outa.getA().tolist())
SCREAMING_SNAKE_CASE = [list(map(self.do_round , lowerCamelCase_)) for each in produce_out]
return np.asarray(lowerCamelCase_)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[int]:
SCREAMING_SNAKE_CASE = np.asmatrix(lowerCamelCase_)
SCREAMING_SNAKE_CASE = self.convolute(
lowerCamelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE = self.pooling(lowerCamelCase_ , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 718
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
a_ : List[str] = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(A__ )
class _snake_case ( A__ ):
_lowercase : Optional[int] = '''rag'''
_lowercase : Optional[int] = True
def __init__( self , a=None , a=True , a=None , a=None , a=None , a=None , a=None , a=" / " , a=" // " , a=5 , a=300 , a=768 , a=8 , a="wiki_dpr" , a="train" , a="compressed" , a=None , a=None , a=False , a=False , a=0.0 , a=True , a=False , a=False , a=False , a=True , a=None , **a , ) -> Optional[Any]:
super().__init__(
bos_token_id=a , pad_token_id=a , eos_token_id=a , decoder_start_token_id=a , forced_eos_token_id=a , is_encoder_decoder=a , prefix=a , vocab_size=a , **a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
SCREAMING_SNAKE_CASE = kwargs.pop('question_encoder')
SCREAMING_SNAKE_CASE = question_encoder_config.pop('model_type')
SCREAMING_SNAKE_CASE = kwargs.pop('generator')
SCREAMING_SNAKE_CASE = decoder_config.pop('model_type')
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE = AutoConfig.for_model(a , **a)
SCREAMING_SNAKE_CASE = AutoConfig.for_model(a , **a)
SCREAMING_SNAKE_CASE = reduce_loss
SCREAMING_SNAKE_CASE = label_smoothing
SCREAMING_SNAKE_CASE = exclude_bos_score
SCREAMING_SNAKE_CASE = do_marginalize
SCREAMING_SNAKE_CASE = title_sep
SCREAMING_SNAKE_CASE = doc_sep
SCREAMING_SNAKE_CASE = n_docs
SCREAMING_SNAKE_CASE = max_combined_length
SCREAMING_SNAKE_CASE = dataset
SCREAMING_SNAKE_CASE = dataset_split
SCREAMING_SNAKE_CASE = index_name
SCREAMING_SNAKE_CASE = retrieval_vector_size
SCREAMING_SNAKE_CASE = retrieval_batch_size
SCREAMING_SNAKE_CASE = passages_path
SCREAMING_SNAKE_CASE = index_path
SCREAMING_SNAKE_CASE = use_dummy_dataset
SCREAMING_SNAKE_CASE = output_retrieved
SCREAMING_SNAKE_CASE = do_deduplication
SCREAMING_SNAKE_CASE = use_cache
if self.forced_eos_token_id is None:
SCREAMING_SNAKE_CASE = getattr(self.generator , 'forced_eos_token_id' , a)
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , a , a , **a) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **a)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__)
SCREAMING_SNAKE_CASE = self.question_encoder.to_dict()
SCREAMING_SNAKE_CASE = self.generator.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 444
| 0
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_A : int = get_tests_dir("""fixtures""")
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = mock.Mock()
SCREAMING_SNAKE_CASE__ = 5_00
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = HTTPError
SCREAMING_SNAKE_CASE__ = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=A_ ) as mock_head:
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowercase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TOKEN
HfFolder.save_token(A_ )
@classmethod
def lowercase_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_ , repo_id='''test-feature-extractor''' , push_to_hub=A_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_ , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=A_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
def lowercase_ ( self ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ = CustomFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
f'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 100
|
from manim import *
class _UpperCAmelCase ( A__ ):
def snake_case_ ( self):
A__ = Rectangle(height=0.5 , width=0.5)
A__ = Rectangle(height=0.2_5 , width=0.2_5)
A__ = Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0)
A__ = [mem.copy() for i in range(6)]
A__ = [mem.copy() for i in range(6)]
A__ = VGroup(*a__).arrange(a__ , buff=0)
A__ = VGroup(*a__).arrange(a__ , buff=0)
A__ = VGroup(a__ , a__).arrange(a__ , buff=0)
A__ = Text('''CPU''' , font_size=2_4)
A__ = Group(a__ , a__).arrange(a__ , buff=0.5 , aligned_edge=a__)
cpu.move_to([-2.5, -0.5, 0])
self.add(a__)
A__ = [mem.copy() for i in range(4)]
A__ = VGroup(*a__).arrange(a__ , buff=0)
A__ = Text('''GPU''' , font_size=2_4)
A__ = Group(a__ , a__).arrange(a__ , buff=0.5 , aligned_edge=a__)
gpu.move_to([-1, -1, 0])
self.add(a__)
A__ = [mem.copy() for i in range(6)]
A__ = VGroup(*a__).arrange(a__ , buff=0)
A__ = Text('''Model''' , font_size=2_4)
A__ = Group(a__ , a__).arrange(a__ , buff=0.5 , aligned_edge=a__)
model.move_to([3, -1.0, 0])
self.add(a__)
A__ = []
A__ = []
A__ = []
for i, rect in enumerate(a__):
rect.set_stroke(a__)
A__ = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3).set_stroke(width=0.0).set_fill(a__ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.0_2 , direction=a__)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=a__ , buff=0.0)
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=a__ , buff=0.0)
self.add(a__)
model_cpu_arr.append(a__)
self.add(*a__ , *a__ , *a__)
A__ = [mem.copy() for i in range(6)]
A__ = VGroup(*a__).arrange(a__ , buff=0)
A__ = Text('''Loaded Checkpoint''' , font_size=2_4)
A__ = Group(a__ , a__).arrange(a__ , buff=0.5 , aligned_edge=a__)
checkpoint.move_to([3, 0.5, 0])
self.add(a__)
A__ = []
A__ = []
for i, rect in enumerate(a__):
A__ = fill.copy().set_fill(a__ , opacity=0.7)
target.move_to(a__)
ckpt_arr.append(a__)
A__ = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.move_to(cpu_right_col_base[i - 5])
ckpt_cpu_arr.append(a__)
self.add(*a__ , *a__)
A__ = Square(side_length=2.2)
key.move_to([-5, 2, 0])
A__ = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0])
self.add(a__ , a__)
A__ = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=1_8 , )
blue_text.next_to(a__ , DOWN * 2.4 , aligned_edge=key_text.get_left())
self.add(a__)
A__ = MarkupText(
F"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=2_4 , )
step_a.move_to([2, 2, 0])
A__ = [meta_mem.copy() for i in range(6)]
A__ = [meta_mem.copy() for i in range(6)]
A__ = VGroup(*a__).arrange(a__ , buff=0)
A__ = VGroup(*a__).arrange(a__ , buff=0)
A__ = VGroup(a__ , a__).arrange(a__ , buff=0)
A__ = Text('''Disk''' , font_size=2_4)
A__ = Group(a__ , a__).arrange(a__ , buff=0.5 , aligned_edge=a__)
disk.move_to([-4.0, -1.2_5, 0])
self.play(Write(a__ , run_time=3) , Write(a__ , run_time=1) , Create(a__ , run_time=1))
A__ = []
for i, rect in enumerate(a__):
A__ = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i]).scale(0.5)
animations.append(MoveToTarget(a__ , run_time=1.5))
self.play(*a__)
self.play(FadeOut(a__))
A__ = MarkupText(F"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=2_4)
step_a.move_to([2, 2, 0])
self.play(Write(a__ , run_time=3))
self.play(
FadeOut(a__ , a__ , *a__ , *a__) , )
self.wait()
| 632
| 0
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( a__ ,unittest.TestCase ):
A_ = MgpstrTokenizer
A_ = False
A_ = {}
A_ = False
def UpperCAmelCase__ ( self : Optional[int] )->List[str]:
'''simple docstring'''
super().setUp()
# fmt: off
__lowerCAmelCase : List[Any] = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__lowerCAmelCase : List[str] = dict(zip(_A , range(len(_A ) ) ) )
__lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_A ) + """\n""" )
def UpperCAmelCase__ ( self : List[Any] , **_snake_case : Union[str, Any] )->Optional[Any]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase__ ( self : str , _snake_case : List[str] )->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = 'tester'
__lowerCAmelCase : Tuple = 'tester'
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def UpperCAmelCase__ ( self : str )->List[str]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Any )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__lowerCAmelCase : List[str] = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__lowerCAmelCase : Dict = tokenizer.encode([special_token] , add_special_tokens=_A )
self.assertEqual(len(_A ) , 1 )
__lowerCAmelCase : Dict = tokenizer.decode(_A , skip_special_tokens=_A )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self : Optional[Any] )->List[str]:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__lowerCAmelCase : Tuple = self.get_input_output_texts(_A )
__lowerCAmelCase : Dict = tokenizer.tokenize(_A )
__lowerCAmelCase : List[Any] = tokenizer.convert_tokens_to_ids(_A )
__lowerCAmelCase : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(_A )
self.assertNotEqual(len(_A ) , 0 )
__lowerCAmelCase : int = tokenizer.decode(_A )
self.assertIsInstance(_A , _A )
self.assertEqual(text_a.replace(""" """ , """""" ) , _A )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def UpperCAmelCase__ ( self : List[str] )->Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def UpperCAmelCase__ ( self : Dict )->Any:
'''simple docstring'''
pass
| 716
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class snake_case_ ( pl.LightningModule ):
def __init__( self : Union[str, Any] , _snake_case : List[str] )->List[str]:
'''simple docstring'''
super().__init__()
__lowerCAmelCase : Dict = model
__lowerCAmelCase : str = 2
__lowerCAmelCase : List[str] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def UpperCAmelCase__ ( self : str )->Dict:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str ) -> str:
# load longformer model from model identifier
__lowerCAmelCase : int = LongformerModel.from_pretrained(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = LightningModel(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
__lowerCAmelCase : Optional[Any] = LongformerForQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCAmelCase = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 240
| 0
|
from __future__ import annotations
from typing import Any
def _SCREAMING_SNAKE_CASE ( a ) -> None:
create_state_space_tree(a , [] , 0 )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> None:
if index == len(a ):
print(a )
return
create_state_space_tree(a , a , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(a , a , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
UpperCAmelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 239
|
def _SCREAMING_SNAKE_CASE ( a ) -> list:
if len(a ) <= 1:
return lst
__A : Any = 1
while i < len(a ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__A , __A : str = lst[i], lst[i - 1]
i -= 1
if i == 0:
__A : Optional[int] = 1
return lst
if __name__ == "__main__":
UpperCAmelCase : Tuple = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase : Optional[int] = [int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 239
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
a__ : int = None
a__ : Tuple = logging.get_logger(__name__)
a__ : str = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : Tuple = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
a__ : int = {
'''google/bigbird-roberta-base''': 4_0_9_6,
'''google/bigbird-roberta-large''': 4_0_9_6,
'''google/bigbird-base-trivia-itc''': 4_0_9_6,
}
a__ : Any = '''▁'''
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Optional[Any] = VOCAB_FILES_NAMES
snake_case__ : Any = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Tuple = BigBirdTokenizer
snake_case__ : Union[str, Any] = ["input_ids", "attention_mask"]
snake_case__ : List[int] = []
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[Any]="<unk>" , UpperCAmelCase__ : str="<s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : str="<pad>" , UpperCAmelCase__ : Tuple="[SEP]" , UpperCAmelCase__ : int="[MASK]" , UpperCAmelCase__ : int="[CLS]" , **UpperCAmelCase__ : List[Any] , ) -> List[Any]:
__SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else bos_token
__SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else eos_token
__SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else unk_token
__SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else pad_token
__SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else cls_token
__SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ )) + [1]
return [1] + ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1]
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ):
copyfile(self.vocab_file , UpperCAmelCase__ )
return (out_vocab_file,)
| 553
|
"""simple docstring"""
import functools
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(lowerCAmelCase_ ) != 3 or not all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(lowerCAmelCase_ ) == 0:
return 0
if min(lowerCAmelCase_ ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(lowerCAmelCase_ ) >= 366:
raise ValueError("All days elements should be less than 366" )
__SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ )
@functools.cache
def dynamic_programming(lowerCAmelCase_ ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 553
| 1
|
_UpperCAmelCase : Union[str, Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : Any = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : Tuple = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
assert len(str(UpperCamelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
snake_case_ = year // 100
snake_case_ = (5 * (century % 4) + 2) % 7
snake_case_ = year % 100
snake_case_ = centurian % 12
snake_case_ = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
snake_case_ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
snake_case_ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362
|
"""simple docstring"""
from __future__ import annotations
def lowercase__(A , A ) ->list[int]:
"""simple docstring"""
lowercase__ : Any= 0
lowercase__ : List[str]= len(A ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowercase__ : str= i + 1
else:
lowercase__ : Optional[Any]= j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 218
| 0
|
"""simple docstring"""
def __lowercase ( a : int ) -> int:
__snake_case : Tuple =abs(a )
__snake_case : Optional[Any] =0
while n > 0:
res += n % 10
n //= 10
return res
def __lowercase ( a : int ) -> int:
__snake_case : Optional[int] =abs(a )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __lowercase ( a : int ) -> int:
return sum(int(a ) for c in str(abs(a ) ) )
def __lowercase ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(a : Callable , a : int ) -> None:
__snake_case : int =f'''{func.__name__}({value})'''
__snake_case : Dict =timeit(f'''__main__.{call}''' , setup='''import __main__''' )
print(f'''{call:56} = {func(a )} -- {timing:.4f} seconds''' )
for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(a , a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 497
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( lowerCAmelCase , unittest.TestCase ):
_a : Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _UpperCamelCase ( self : Dict , a : Optional[Any]=0 ):
"""simple docstring"""
__snake_case : List[str] =np.random.RandomState(a )
__snake_case : Union[str, Any] ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _UpperCamelCase ( self : str ):
"""simple docstring"""
__snake_case : Dict =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a )
__snake_case : Tuple =self.get_dummy_inputs()
__snake_case : List[str] =pipe(**a ).images
__snake_case : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case : str =np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : Dict =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : Dict =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a )
pipe.set_progress_bar_config(disable=a )
__snake_case : List[Any] =self.get_dummy_inputs()
__snake_case : Dict =pipe(**a ).images
__snake_case : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case : Optional[Any] =np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
__snake_case : Any =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : Any =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
__snake_case : int =self.get_dummy_inputs()
__snake_case : List[str] =pipe(**a ).images
__snake_case : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case : int =np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
__snake_case : Optional[int] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : Optional[Any] =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
__snake_case : str =self.get_dummy_inputs()
__snake_case : int =pipe(**a ).images
__snake_case : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case : Union[str, Any] =np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case : Any =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : List[str] =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
__snake_case : List[str] =self.get_dummy_inputs()
__snake_case : Dict =pipe(**a ).images
__snake_case : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case : Tuple =np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
__snake_case : Tuple =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : Any =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
__snake_case : Tuple =self.get_dummy_inputs()
__snake_case : Tuple =pipe(**a ).images
__snake_case : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case : Dict =np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : int ):
"""simple docstring"""
__snake_case : Dict =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a )
__snake_case : Optional[int] =self.get_dummy_inputs()
__snake_case : Any =3 * [inputs['''prompt''']]
# forward
__snake_case : Any =pipe(**a )
__snake_case : str =output.images[0, -3:, -3:, -1]
__snake_case : Tuple =self.get_dummy_inputs()
__snake_case : Any =3 * [inputs.pop('''prompt''' )]
__snake_case : Optional[Any] =pipe.tokenizer(
a , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=a , return_tensors='''np''' , )
__snake_case : List[Any] =text_inputs['''input_ids''']
__snake_case : str =pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__snake_case : Optional[Any] =prompt_embeds
# forward
__snake_case : Dict =pipe(**a )
__snake_case : Any =output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
__snake_case : Dict =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a )
__snake_case : List[Any] =self.get_dummy_inputs()
__snake_case : Optional[Any] =3 * ['''this is a negative prompt''']
__snake_case : List[str] =negative_prompt
__snake_case : str =3 * [inputs['''prompt''']]
# forward
__snake_case : int =pipe(**a )
__snake_case : Union[str, Any] =output.images[0, -3:, -3:, -1]
__snake_case : Tuple =self.get_dummy_inputs()
__snake_case : Union[str, Any] =3 * [inputs.pop('''prompt''' )]
__snake_case : Optional[int] =[]
for p in [prompt, negative_prompt]:
__snake_case : Optional[int] =pipe.tokenizer(
a , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=a , return_tensors='''np''' , )
__snake_case : Optional[Any] =text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__snake_case , __snake_case : Optional[Any] =embeds
# forward
__snake_case : Any =pipe(**a )
__snake_case : Union[str, Any] =output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
@property
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : List[str] =ort.SessionOptions()
__snake_case : Optional[int] =False
return options
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
__snake_case : List[Any] =OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=a )
__snake_case : List[str] ='''A painting of a squirrel eating a burger'''
np.random.seed(0 )
__snake_case : Tuple =sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=1_0 , output_type='''np''' )
__snake_case : Union[str, Any] =output.images
__snake_case : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__snake_case : Any =np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
__snake_case : List[str] =DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case : List[Any] =OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=a )
__snake_case : Optional[Any] ='''open neural network exchange'''
__snake_case : Optional[int] =np.random.RandomState(0 )
__snake_case : int =sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=a , output_type='''np''' )
__snake_case : Union[str, Any] =output.images
__snake_case : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__snake_case : Union[str, Any] =np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
__snake_case : List[str] =LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case : int =OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=a )
__snake_case : Optional[int] ='''open neural network exchange'''
__snake_case : Optional[Any] =np.random.RandomState(0 )
__snake_case : Any =sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=a , output_type='''np''' )
__snake_case : Optional[int] =output.images
__snake_case : Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__snake_case : Optional[int] =np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : Union[str, Any] =0
def test_callback_fn(a : int , a : int , a : np.ndarray ) -> None:
__snake_case : Dict =True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 6_4, 6_4)
__snake_case : Union[str, Any] =latents[0, -3:, -3:, -1]
__snake_case : str =np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 6_4, 6_4)
__snake_case : List[Any] =latents[0, -3:, -3:, -1]
__snake_case : List[Any] =np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
__snake_case : str =False
__snake_case : int =OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
__snake_case : List[Any] ='''Andromeda galaxy in a bottle'''
__snake_case : Optional[int] =np.random.RandomState(0 )
pipe(
prompt=a , num_inference_steps=5 , guidance_scale=7.5 , generator=a , callback=a , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case : Optional[Any] =OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(a , a )
assert pipe.safety_checker is None
__snake_case : int =pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
__snake_case : List[Any] =OnnxStableDiffusionPipeline.from_pretrained(a )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__snake_case : Any =pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
| 497
| 1
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase__ ( A_ ):
__UpperCAmelCase = (DDPMScheduler,)
def UpperCamelCase_ ( self , **SCREAMING_SNAKE_CASE) -> Optional[int]:
_lowerCamelCase : Optional[int] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**SCREAMING_SNAKE_CASE)
return config
def UpperCamelCase_ ( self) -> Optional[int]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Optional[int]:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE , beta_end=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> str:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Union[str, Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Optional[Any]:
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , )
def UpperCamelCase_ ( self) -> Tuple:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> List[str]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> List[str]:
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config()
_lowerCamelCase : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_09_79)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1e-5
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : List[Any] = self.get_scheduler_config()
_lowerCamelCase : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = self.dummy_model()
_lowerCamelCase : List[Any] = self.dummy_sample_deter
_lowerCamelCase : int = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE)):
# 1. predict noise residual
_lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCamelCase : Any = pred_prev_sample
_lowerCamelCase : Dict = torch.sum(torch.abs(SCREAMING_SNAKE_CASE))
_lowerCamelCase : Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 2_58.96_06) < 1e-2
assert abs(result_mean.item() - 0.33_72) < 1e-3
def UpperCamelCase_ ( self) -> Optional[int]:
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config(prediction_type="""v_prediction""")
_lowerCamelCase : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = self.dummy_model()
_lowerCamelCase : Tuple = self.dummy_sample_deter
_lowerCamelCase : List[Any] = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE)):
# 1. predict noise residual
_lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : Union[str, Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCamelCase : Optional[Any] = pred_prev_sample
_lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE))
_lowerCamelCase : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 2_02.02_96) < 1e-2
assert abs(result_mean.item() - 0.26_31) < 1e-3
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : Any = self.scheduler_classes[0]
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCamelCase : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : int = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = scheduler.timesteps
for i, timestep in enumerate(SCREAMING_SNAKE_CASE):
if i == len(SCREAMING_SNAKE_CASE) - 1:
_lowerCamelCase : Dict = -1
else:
_lowerCamelCase : int = timesteps[i + 1]
_lowerCamelCase : Union[str, Any] = scheduler.previous_timestep(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = prev_t.item()
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Tuple:
_lowerCamelCase : Dict = self.scheduler_classes[0]
_lowerCamelCase : List[str] = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : int = [100, 87, 50, 51, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order."""):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase : str = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : Any = [100, 87, 50, 1, 0]
_lowerCamelCase : List[str] = len(SCREAMING_SNAKE_CASE)
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`."""):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> List[str]:
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config()
_lowerCamelCase : Dict = scheduler_class(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : int = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE)
| 88
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Optional[Any] = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 142
| 0
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> list:
UpperCAmelCase__ : int = len(lowerCAmelCase__ )
UpperCAmelCase__ : Dict = []
for i in range(len(lowerCAmelCase__ ) - pat_len + 1 ):
UpperCAmelCase__ : Dict = True
for j in range(lowerCAmelCase__ ):
if s[i + j] != pattern[j]:
UpperCAmelCase__ : Any = False
break
if match_found:
position.append(lowerCAmelCase__ )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 312
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> bool:
UpperCAmelCase__ : List[Any] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 312
| 1
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = """mvp"""
a__ : Any = ["""past_key_values"""]
a__ : Optional[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __lowercase=50_267 , __lowercase=1_024 , __lowercase=12 , __lowercase=4_096 , __lowercase=16 , __lowercase=12 , __lowercase=4_096 , __lowercase=16 , __lowercase=0.0 , __lowercase=0.0 , __lowercase="gelu" , __lowercase=1_024 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=0.0 , __lowercase=False , __lowercase=True , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase=True , __lowercase=2 , __lowercase=2 , __lowercase=False , __lowercase=100 , __lowercase=800 , **__lowercase , ) -> List[Any]:
__UpperCamelCase :Dict = vocab_size
__UpperCamelCase :Union[str, Any] = max_position_embeddings
__UpperCamelCase :Union[str, Any] = d_model
__UpperCamelCase :Optional[int] = encoder_ffn_dim
__UpperCamelCase :Any = encoder_layers
__UpperCamelCase :List[str] = encoder_attention_heads
__UpperCamelCase :Optional[Any] = decoder_ffn_dim
__UpperCamelCase :Optional[Any] = decoder_layers
__UpperCamelCase :Dict = decoder_attention_heads
__UpperCamelCase :Dict = dropout
__UpperCamelCase :Optional[Any] = attention_dropout
__UpperCamelCase :Any = activation_dropout
__UpperCamelCase :str = activation_function
__UpperCamelCase :Optional[Any] = init_std
__UpperCamelCase :Tuple = encoder_layerdrop
__UpperCamelCase :List[Any] = decoder_layerdrop
__UpperCamelCase :int = classifier_dropout
__UpperCamelCase :Dict = use_cache
__UpperCamelCase :Any = encoder_layers
__UpperCamelCase :Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase :Dict = use_prompt
__UpperCamelCase :List[str] = prompt_length
__UpperCamelCase :Union[str, Any] = prompt_mid_dim
super().__init__(
pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , decoder_start_token_id=__lowercase , forced_eos_token_id=__lowercase , **__lowercase , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __lowercase):
__UpperCamelCase :List[str] = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''')
| 167
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : Any = DDIMPipeline
a__ : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
a__ : Any = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
a__ : Optional[int] = False
def UpperCamelCase__ ( self) -> Optional[Any]:
torch.manual_seed(0)
__UpperCamelCase :Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
__UpperCamelCase :Dict = DDIMScheduler()
__UpperCamelCase :int = {'''unet''': unet, '''scheduler''': scheduler}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0) -> Tuple:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :Optional[int] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :Tuple = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :str = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :int = '''cpu'''
__UpperCamelCase :Any = self.get_dummy_components()
__UpperCamelCase :Any = self.pipeline_class(**__lowercase)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Dict = self.get_dummy_inputs(__lowercase)
__UpperCamelCase :Union[str, Any] = pipe(**__lowercase).images
__UpperCamelCase :Dict = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3))
__UpperCamelCase :List[str] = np.array(
[1.0_0_0E0_0, 5.7_1_7E-0_1, 4.7_1_7E-0_1, 1.0_0_0E0_0, 0.0_0_0E0_0, 1.0_0_0E0_0, 3.0_0_0E-0_4, 0.0_0_0E0_0, 9.0_0_0E-0_4])
__UpperCamelCase :int = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(__lowercase , 1E-3)
def UpperCamelCase__ ( self) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3)
def UpperCamelCase__ ( self) -> Any:
super().test_save_load_local(expected_max_difference=3E-3)
def UpperCamelCase__ ( self) -> Optional[Any]:
super().test_save_load_optional_components(expected_max_difference=3E-3)
def UpperCamelCase__ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :int = '''google/ddpm-cifar10-32'''
__UpperCamelCase :str = UNetaDModel.from_pretrained(__lowercase)
__UpperCamelCase :int = DDIMScheduler()
__UpperCamelCase :Optional[Any] = DDIMPipeline(unet=__lowercase , scheduler=__lowercase)
ddim.to(__lowercase)
ddim.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Dict = torch.manual_seed(0)
__UpperCamelCase :Tuple = ddim(generator=__lowercase , eta=0.0 , output_type='''numpy''').images
__UpperCamelCase :Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase :List[str] = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :int = '''google/ddpm-ema-bedroom-256'''
__UpperCamelCase :Optional[Any] = UNetaDModel.from_pretrained(__lowercase)
__UpperCamelCase :Union[str, Any] = DDIMScheduler.from_pretrained(__lowercase)
__UpperCamelCase :Optional[int] = DDIMPipeline(unet=__lowercase , scheduler=__lowercase)
ddpm.to(__lowercase)
ddpm.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Dict = torch.manual_seed(0)
__UpperCamelCase :Optional[int] = ddpm(generator=__lowercase , output_type='''numpy''').images
__UpperCamelCase :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__UpperCamelCase :Any = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 167
| 1
|
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__lowerCAmelCase = 2_99_79_24_58
# Symbols
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase = symbols('''ct x y z''')
def snake_case_ ( snake_case ) -> float:
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def snake_case_ ( snake_case ) -> float:
return 1 / sqrt(1 - beta(snake_case ) ** 2 )
def snake_case_ ( snake_case ) -> np.ndarray:
return np.array(
[
[gamma(snake_case ), -gamma(snake_case ) * beta(snake_case ), 0, 0],
[-gamma(snake_case ) * beta(snake_case ), gamma(snake_case ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def snake_case_ ( snake_case , snake_case = None ) -> np.ndarray:
# Ensure event is not empty
if event is None:
lowercase__: List[Any] = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(snake_case ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__lowerCAmelCase = transform(29_97_92_45)
print('''Example of four vector: ''')
print(F'''ct\' = {four_vector[0]}''')
print(F'''x\' = {four_vector[1]}''')
print(F'''y\' = {four_vector[2]}''')
print(F'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
__lowerCAmelCase = {ct: c, x: 1, y: 1, z: 1}
__lowerCAmelCase = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F'''\n{numerical_vector}''')
| 335
|
def snake_case_ ( snake_case ) -> int:
if not isinstance(snake_case , snake_case ):
raise TypeError('only integers accepted as input' )
else:
lowercase__: str = str(abs(snake_case ) )
lowercase__: Tuple = [list(snake_case ) for char in range(len(snake_case ) )]
for index in range(len(snake_case ) ):
num_transpositions[index].pop(snake_case )
return max(
int(''.join(list(snake_case ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 335
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
__A : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__A : Optional[str] = field(
default=A__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__A : Optional[str] = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
__A : Optional[str] = field(
default=A__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__A : bool = field(default=A__ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__A : Optional[str] = field(
default=A__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __UpperCamelCase :
__A : str = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
__A : Optional[str] = field(
default=A__ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
__A : int = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__A : bool = field(
default=A__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def A__ ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
_UpperCAmelCase = import_module('''tasks''' )
try:
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , model_args.task_type )
_UpperCAmelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , SCREAMING_SNAKE_CASE_ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
_UpperCAmelCase = token_classification_task.get_labels(data_args.labels )
_UpperCAmelCase = dict(enumerate(SCREAMING_SNAKE_CASE_ ) )
_UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE_ , idalabel=SCREAMING_SNAKE_CASE_ , labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE_ )} , cache_dir=model_args.cache_dir , )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
_UpperCAmelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE_ , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE_ , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ) -> Tuple[List[int], List[int]]:
_UpperCAmelCase = np.argmax(SCREAMING_SNAKE_CASE_ , axis=2 )
_UpperCAmelCase , _UpperCAmelCase = preds.shape
_UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE_ )]
_UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE_ )]
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(SCREAMING_SNAKE_CASE_ : EvalPrediction ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
"precision": precision_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
"recall": recall_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
"f1": fa_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
}
# Data collator
_UpperCAmelCase = DataCollatorWithPadding(SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(SCREAMING_SNAKE_CASE_ )
# Predict
if training_args.do_predict:
_UpperCAmelCase = TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE_ , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = trainer.predict(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase , _UpperCAmelCase = align_predictions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
_UpperCAmelCase = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return results
def A__ ( SCREAMING_SNAKE_CASE_ : Dict ) -> Any:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 32
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[Any] = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
_lowercase : Dict = '''CIDAS/clipseg-rd64-refined'''
_lowercase : List[Any] = '''image_segmenter'''
_lowercase : Tuple = CLIPSegForImageSegmentation
_lowercase : str = ['''image''', '''text''']
_lowercase : Dict = ['''image''']
def __init__( self : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
requires_backends(self , ["""vision"""])
super().__init__(*UpperCamelCase__ , **UpperCamelCase__)
def __magic_name__ ( self : str , UpperCamelCase__ : "Image" , UpperCamelCase__ : str):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=UpperCamelCase__ , return_tensors="""pt""")
def __magic_name__ ( self : Any , UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
with torch.no_grad():
snake_case__ = self.model(**UpperCamelCase__).logits
return logits
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = outputs.cpu().detach().numpy()
snake_case__ = 0
snake_case__ = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 654
| 0
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class A :
def __init__( self : str , __UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = data
UpperCamelCase_ = None
class A :
def __init__( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = None
UpperCamelCase_ = None
def __iter__( self : Dict ) -> Iterator[Any]:
"""simple docstring"""
UpperCamelCase_ = self.head
while self.head:
yield node.data
UpperCamelCase_ = node.next
if node == self.head:
break
def __len__( self : Optional[Any] ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self : str ) -> Any:
"""simple docstring"""
return "->".join(str(__UpperCAmelCase ) for item in iter(self ) )
def lowercase__ ( self : Dict , __UpperCAmelCase : Any ) -> None:
"""simple docstring"""
self.insert_nth(len(self ) , __UpperCAmelCase )
def lowercase__ ( self : int , __UpperCAmelCase : Any ) -> None:
"""simple docstring"""
self.insert_nth(0 , __UpperCAmelCase )
def lowercase__ ( self : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any ) -> None:
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
UpperCamelCase_ = Node(__UpperCAmelCase )
if self.head is None:
UpperCamelCase_ = new_node # first node points itself
UpperCamelCase_ = UpperCamelCase_ = new_node
elif index == 0: # insert at head
UpperCamelCase_ = self.head
UpperCamelCase_ = UpperCamelCase_ = new_node
else:
UpperCamelCase_ = self.head
for _ in range(index - 1 ):
UpperCamelCase_ = temp.next
UpperCamelCase_ = temp.next
UpperCamelCase_ = new_node
if index == len(self ) - 1: # insert at tail
UpperCamelCase_ = new_node
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
return self.delete_nth(0 )
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowercase__ ( self : Any , __UpperCAmelCase : int = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
UpperCamelCase_ = self.head
if self.head == self.tail: # just one node
UpperCamelCase_ = UpperCamelCase_ = None
elif index == 0: # delete head node
UpperCamelCase_ = self.tail.next.next
UpperCamelCase_ = self.head.next
else:
UpperCamelCase_ = self.head
for _ in range(index - 1 ):
UpperCamelCase_ = temp.next
UpperCamelCase_ = temp.next
UpperCamelCase_ = temp.next.next
if index == len(self ) - 1: # delete at tail
UpperCamelCase_ = temp
return delete_node.data
def lowercase__ ( self : List[str] ) -> bool:
"""simple docstring"""
return len(self ) == 0
def a_ ( ) -> None:
'''simple docstring'''
UpperCamelCase_ = CircularLinkedList()
assert len(__snake_case ) == 0
assert circular_linked_list.is_empty() is True
assert str(__snake_case ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(__snake_case ) == i
circular_linked_list.insert_nth(__snake_case , i + 1 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def a_ ( __snake_case , __snake_case=0.9_99 , __snake_case="cosine" , ) -> Dict:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__snake_case ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__snake_case ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
UpperCamelCase_ = []
for i in range(__snake_case ):
UpperCamelCase_ = i / num_diffusion_timesteps
UpperCamelCase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__snake_case ) / alpha_bar_fn(__snake_case ) , __snake_case ) )
return torch.tensor(__snake_case , dtype=torch.floataa )
class A ( lowerCamelCase_ , lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
_SCREAMING_SNAKE_CASE : Any = 2
@register_to_config
def __init__( self : Tuple , __UpperCAmelCase : int = 1000 , __UpperCAmelCase : float = 0.00_085 , __UpperCAmelCase : float = 0.012 , __UpperCAmelCase : str = "linear" , __UpperCAmelCase : Optional[Union[np.ndarray, List[float]]] = None , __UpperCAmelCase : str = "epsilon" , __UpperCAmelCase : Optional[bool] = False , __UpperCAmelCase : Optional[bool] = False , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : str = "linspace" , __UpperCAmelCase : int = 0 , ) -> Any:
"""simple docstring"""
if trained_betas is not None:
UpperCamelCase_ = torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCamelCase_ = torch.linspace(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __UpperCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase_ = betas_for_alpha_bar(__UpperCAmelCase , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
UpperCamelCase_ = betas_for_alpha_bar(__UpperCAmelCase , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
UpperCamelCase_ = 1.0 - self.betas
UpperCamelCase_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase_ = use_karras_sigmas
def lowercase__ ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if schedule_timesteps is None:
UpperCamelCase_ = self.timesteps
UpperCamelCase_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCamelCase_ = 1 if len(__UpperCAmelCase ) > 1 else 0
else:
UpperCamelCase_ = timestep.cpu().item() if torch.is_tensor(__UpperCAmelCase ) else timestep
UpperCamelCase_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowercase__ ( self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase_ = self.index_for_timestep(__UpperCAmelCase )
UpperCamelCase_ = self.sigmas[step_index]
UpperCamelCase_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowercase__ ( self : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, torch.device] = None , __UpperCAmelCase : Optional[int] = None , ) -> str:
"""simple docstring"""
UpperCamelCase_ = num_inference_steps
UpperCamelCase_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCamelCase_ = np.linspace(0 , num_train_timesteps - 1 , __UpperCAmelCase , dtype=__UpperCAmelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCamelCase_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase_ = (np.arange(0 , __UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(__UpperCAmelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCamelCase_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase_ = (np.arange(__UpperCAmelCase , 0 , -step_ratio )).round().copy().astype(__UpperCAmelCase )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
UpperCamelCase_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCamelCase_ = np.log(__UpperCAmelCase )
UpperCamelCase_ = np.interp(__UpperCAmelCase , np.arange(0 , len(__UpperCAmelCase ) ) , __UpperCAmelCase )
if self.config.use_karras_sigmas:
UpperCamelCase_ = self._convert_to_karras(in_sigmas=__UpperCAmelCase , num_inference_steps=self.num_inference_steps )
UpperCamelCase_ = np.array([self._sigma_to_t(__UpperCAmelCase , __UpperCAmelCase ) for sigma in sigmas] )
UpperCamelCase_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCamelCase_ = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase )
UpperCamelCase_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
UpperCamelCase_ = torch.from_numpy(__UpperCAmelCase )
UpperCamelCase_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__UpperCAmelCase ).startswith('mps' ):
# mps does not support float64
UpperCamelCase_ = timesteps.to(__UpperCAmelCase , dtype=torch.floataa )
else:
UpperCamelCase_ = timesteps.to(device=__UpperCAmelCase )
# empty dt and derivative
UpperCamelCase_ = None
UpperCamelCase_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCamelCase_ = defaultdict(__UpperCAmelCase )
def lowercase__ ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = np.log(__UpperCAmelCase )
# get distribution
UpperCamelCase_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
UpperCamelCase_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
UpperCamelCase_ = low_idx + 1
UpperCamelCase_ = log_sigmas[low_idx]
UpperCamelCase_ = log_sigmas[high_idx]
# interpolate sigmas
UpperCamelCase_ = (low - log_sigma) / (low - high)
UpperCamelCase_ = np.clip(__UpperCAmelCase , 0 , 1 )
# transform interpolation to time range
UpperCamelCase_ = (1 - w) * low_idx + w * high_idx
UpperCamelCase_ = t.reshape(sigma.shape )
return t
def lowercase__ ( self : int , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : str ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase_ = in_sigmas[-1].item()
UpperCamelCase_ = in_sigmas[0].item()
UpperCamelCase_ = 7.0 # 7.0 is the value used in the paper
UpperCamelCase_ = np.linspace(0 , 1 , __UpperCAmelCase )
UpperCamelCase_ = sigma_min ** (1 / rho)
UpperCamelCase_ = sigma_max ** (1 / rho)
UpperCamelCase_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
return self.dt is None
def lowercase__ ( self : List[str] , __UpperCAmelCase : Union[torch.FloatTensor, np.ndarray] , __UpperCAmelCase : Union[float, torch.FloatTensor] , __UpperCAmelCase : Union[torch.FloatTensor, np.ndarray] , __UpperCAmelCase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
UpperCamelCase_ = self.index_for_timestep(__UpperCAmelCase )
# advance index counter by 1
UpperCamelCase_ = timestep.cpu().item() if torch.is_tensor(__UpperCAmelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCamelCase_ = self.sigmas[step_index]
UpperCamelCase_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
UpperCamelCase_ = self.sigmas[step_index - 1]
UpperCamelCase_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCamelCase_ = 0
UpperCamelCase_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCamelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCamelCase_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCamelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCamelCase_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
UpperCamelCase_ = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
UpperCamelCase_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCamelCase_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCamelCase_ = sigma_next - sigma_hat
# store for 2nd order step
UpperCamelCase_ = derivative
UpperCamelCase_ = dt
UpperCamelCase_ = sample
else:
# 2. 2nd order / Heun's method
UpperCamelCase_ = (sample - pred_original_sample) / sigma_next
UpperCamelCase_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
UpperCamelCase_ = self.dt
UpperCamelCase_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__UpperCAmelCase )
def lowercase__ ( self : Any , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__UpperCAmelCase ):
# mps does not support float64
UpperCamelCase_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
UpperCamelCase_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
UpperCamelCase_ = self.timesteps.to(original_samples.device )
UpperCamelCase_ = timesteps.to(original_samples.device )
UpperCamelCase_ = [self.index_for_timestep(__UpperCAmelCase , __UpperCAmelCase ) for t in timesteps]
UpperCamelCase_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCamelCase_ = sigma.unsqueeze(-1 )
UpperCamelCase_ = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 559
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> list[int]:
if length <= 0 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(__UpperCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 159
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 159
| 1
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
__a = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
__a = {
'vinai/phobert-base': 256,
'vinai/phobert-large': 256,
}
def _UpperCamelCase ( lowerCAmelCase_ ) ->Optional[int]:
UpperCAmelCase = set()
UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase = char
UpperCAmelCase = set(_lowerCAmelCase )
return pairs
class __lowercase ( __snake_case ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any="<s>" , __lowerCamelCase : Union[str, Any]="</s>" , __lowerCamelCase : Union[str, Any]="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Tuple="<unk>" , __lowerCamelCase : Any="<pad>" , __lowerCamelCase : List[str]="<mask>" , **__lowerCamelCase : str , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , **__A , )
UpperCAmelCase = vocab_file
UpperCAmelCase = merges_file
UpperCAmelCase = {}
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 2
UpperCAmelCase = 3
self.add_from_file(__A )
UpperCAmelCase = {v: k for k, v in self.encoder.items()}
with open(__A , encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase = merges_handle.read().split("""\n""" )[:-1]
UpperCAmelCase = [tuple(merge.split()[:-1] ) for merge in merges]
UpperCAmelCase = dict(zip(__A , range(len(__A ) ) ) )
UpperCAmelCase = {}
def _lowercase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> int:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> Any:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _lowercase ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> str:
"""simple docstring"""
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowercase ( self : int ) -> List[str]:
"""simple docstring"""
return len(self.encoder )
def _lowercase ( self : List[Any] ) -> Any:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : Any , __lowerCamelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCAmelCase = tuple(__A )
UpperCAmelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
UpperCAmelCase = get_pairs(__A )
if not pairs:
return token
while True:
UpperCAmelCase = min(__A , key=lambda __lowerCamelCase : self.bpe_ranks.get(__A , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase = bigram
UpperCAmelCase = []
UpperCAmelCase = 0
while i < len(__A ):
try:
UpperCAmelCase = word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase = tuple(__A )
UpperCAmelCase = new_word
if len(__A ) == 1:
break
else:
UpperCAmelCase = get_pairs(__A )
UpperCAmelCase = "@@ ".join(__A )
UpperCAmelCase = word[:-4]
UpperCAmelCase = word
return word
def _lowercase ( self : str , __lowerCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = re.findall(r"""\S+\n?""" , __A )
for token in words:
split_tokens.extend(list(self.bpe(__A ).split(""" """ ) ) )
return split_tokens
def _lowercase ( self : str , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def _lowercase ( self : List[str] , __lowerCamelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
return self.decoder.get(__A , self.unk_token )
def _lowercase ( self : Optional[int] , __lowerCamelCase : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = " ".join(__A ).replace("""@@ """ , """""" ).strip()
return out_string
def _lowercase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> List[Any]:
"""simple docstring"""
if not os.path.isdir(__A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
if os.path.abspath(self.merges_file ) != os.path.abspath(__A ):
copyfile(self.merges_file , __A )
return out_vocab_file, out_merge_file
def _lowercase ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if isinstance(__A , __A ):
try:
with open(__A , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(__A )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
UpperCAmelCase = f.readlines()
for lineTmp in lines:
UpperCAmelCase = lineTmp.strip()
UpperCAmelCase = line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
UpperCAmelCase = line[:idx]
UpperCAmelCase = len(self.encoder )
| 711
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 627
| 0
|
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[str] = len(__lowerCamelCase )
__snake_case : Union[str, Any] = len(__lowerCamelCase )
__snake_case : Optional[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__snake_case : int = True
for i in range(__lowerCamelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__snake_case : Any = True
if a[i].islower():
__snake_case : List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 237
| 0
|
UpperCAmelCase__ : List[str] = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
UpperCAmelCase__ : int = frozenset(["prompt", "negative_prompt"])
UpperCAmelCase__ : Optional[Any] = frozenset([])
UpperCAmelCase__ : Any = frozenset(["image"])
UpperCAmelCase__ : List[str] = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
UpperCAmelCase__ : int = frozenset(["image"])
UpperCAmelCase__ : List[Any] = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
UpperCAmelCase__ : Any = frozenset(["prompt", "image", "negative_prompt"])
UpperCAmelCase__ : Dict = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
UpperCAmelCase__ : Any = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
UpperCAmelCase__ : List[Any] = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
UpperCAmelCase__ : int = frozenset(["image", "mask_image"])
UpperCAmelCase__ : str = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
UpperCAmelCase__ : Union[str, Any] = frozenset(["example_image", "image", "mask_image"])
UpperCAmelCase__ : str = frozenset(["class_labels"])
UpperCAmelCase__ : Optional[Any] = frozenset(["class_labels"])
UpperCAmelCase__ : List[str] = frozenset(["batch_size"])
UpperCAmelCase__ : List[Any] = frozenset([])
UpperCAmelCase__ : Optional[Any] = frozenset(["batch_size"])
UpperCAmelCase__ : str = frozenset([])
UpperCAmelCase__ : List[Any] = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
UpperCAmelCase__ : Optional[int] = frozenset(["prompt", "negative_prompt"])
UpperCAmelCase__ : Tuple = frozenset(["input_tokens"])
UpperCAmelCase__ : List[str] = frozenset(["input_tokens"])
| 676
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase__ : Any = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict:
__snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append('__init__.py')
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase_)
if only_modules:
__snake_case = file.split('.')[0]
try:
__snake_case = getattr(lowercase_ , lowercase_)
__snake_case = doctest.DocTestSuite(lowercase_)
__snake_case = unittest.TextTestRunner().run(lowercase_)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _a ( self) -> str:
__snake_case = Path('src/transformers')
__snake_case = 'modeling'
__snake_case = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = Path('src/transformers')
__snake_case = 'tokenization'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> List[str]:
__snake_case = Path('src/transformers')
__snake_case = 'configuration'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('src/transformers')
__snake_case = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase_ , n_identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('docs/source')
__snake_case = ['favicon.ico']
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
| 676
| 1
|
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [[0 for _ in range(__SCREAMING_SNAKE_CASE )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowercase = 1
for n in range(m + 1 ):
for k in range(1 , __SCREAMING_SNAKE_CASE ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 84
|
import math
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [True] * n
lowercase = False
lowercase = False
lowercase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowercase = i * 2
while index < n:
lowercase = False
lowercase = index + i
lowercase = [2]
for i in range(3 , __SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(__SCREAMING_SNAKE_CASE )
return primes
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 9999_6666_3333 ):
lowercase = math.floor(math.sqrt(__SCREAMING_SNAKE_CASE ) ) + 100
lowercase = prime_sieve(__SCREAMING_SNAKE_CASE )
lowercase = 0
lowercase = 0
lowercase = primes[prime_index]
while (last_prime**2) <= limit:
lowercase = primes[prime_index + 1]
lowercase = last_prime**2
lowercase = next_prime**2
# Get numbers divisible by lps(current)
lowercase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowercase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowercase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowercase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 84
| 1
|
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class snake_case :
def __init__( self : List[str] ) -> Tuple:
'''simple docstring'''
_A = {}
def a_ ( self : Union[str, Any] , a__ : Dict , a__ : Optional[int] , a__ : Union[str, Any]=1 ) -> Union[str, Any]:
'''simple docstring'''
if self.graph.get(A__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_A = [[w, v]]
if not self.graph.get(A__ ):
_A = []
def a_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return list(self.graph )
def a_ ( self : Tuple , a__ : List[Any] , a__ : Tuple ) -> Optional[int]:
'''simple docstring'''
if self.graph.get(A__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A__ )
def a_ ( self : Tuple , a__ : Tuple=-2 , a__ : int=-1 ) -> Union[str, Any]:
'''simple docstring'''
if s == d:
return []
_A = []
_A = []
if s == -2:
_A = list(self.graph )[0]
stack.append(A__ )
visited.append(A__ )
_A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A__ ) != 0:
_A = stack[len(A__ ) - 1]
else:
_A = ss
# check if se have reached the starting point
if len(A__ ) == 0:
return visited
def a_ ( self : str , a__ : Dict=-1 ) -> Tuple:
'''simple docstring'''
if c == -1:
_A = floor(random() * 1_00_00 ) + 10
for i in range(A__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
_A = floor(random() * c ) + 1
if n != i:
self.add_pair(A__ , A__ , 1 )
def a_ ( self : Any , a__ : Tuple=-2 ) -> int:
'''simple docstring'''
_A = deque()
_A = []
if s == -2:
_A = list(self.graph )[0]
d.append(A__ )
visited.append(A__ )
while d:
_A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def a_ ( self : Dict , a__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_A = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def a_ ( self : List[Any] , a__ : List[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.graph[u] )
def a_ ( self : int , a__ : Tuple=-2 ) -> Any:
'''simple docstring'''
_A = []
_A = []
if s == -2:
_A = list(self.graph )[0]
stack.append(A__ )
visited.append(A__ )
_A = s
_A = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_A = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(A__ ) != 0:
_A = stack[len(A__ ) - 1]
else:
_A = ss
# check if se have reached the starting point
if len(A__ ) == 0:
return sorted_nodes
def a_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_A = []
_A = []
_A = list(self.graph )[0]
stack.append(A__ )
visited.append(A__ )
_A = -2
_A = []
_A = s
_A = False
_A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_A = len(A__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_A = True
if len(A__ ) != 0:
_A = stack[len(A__ ) - 1]
else:
_A = False
indirect_parents.append(A__ )
_A = s
_A = ss
# check if se have reached the starting point
if len(A__ ) == 0:
return list(A__ )
def a_ ( self : Any ) -> Any:
'''simple docstring'''
_A = []
_A = []
_A = list(self.graph )[0]
stack.append(A__ )
visited.append(A__ )
_A = -2
_A = []
_A = s
_A = False
_A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_A = len(A__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_A = True
if len(A__ ) != 0:
_A = stack[len(A__ ) - 1]
else:
_A = False
indirect_parents.append(A__ )
_A = s
_A = ss
# check if se have reached the starting point
if len(A__ ) == 0:
return False
def a_ ( self : List[str] , a__ : Optional[int]=-2 , a__ : Union[str, Any]=-1 ) -> Optional[int]:
'''simple docstring'''
_A = time()
self.dfs(A__ , A__ )
_A = time()
return end - begin
def a_ ( self : Dict , a__ : Optional[Any]=-2 ) -> List[str]:
'''simple docstring'''
_A = time()
self.bfs(A__ )
_A = time()
return end - begin
class snake_case :
def __init__( self : Dict ) -> List[Any]:
'''simple docstring'''
_A = {}
def a_ ( self : Optional[int] , a__ : str , a__ : int , a__ : Tuple=1 ) -> Dict:
'''simple docstring'''
if self.graph.get(A__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_A = [[w, v]]
# add the other way
if self.graph.get(A__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_A = [[w, u]]
def a_ ( self : int , a__ : Optional[int] , a__ : Dict ) -> Tuple:
'''simple docstring'''
if self.graph.get(A__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A__ )
# the other way round
if self.graph.get(A__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(A__ )
def a_ ( self : Tuple , a__ : List[str]=-2 , a__ : List[Any]=-1 ) -> Optional[Any]:
'''simple docstring'''
if s == d:
return []
_A = []
_A = []
if s == -2:
_A = list(self.graph )[0]
stack.append(A__ )
visited.append(A__ )
_A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A__ ) != 0:
_A = stack[len(A__ ) - 1]
else:
_A = ss
# check if se have reached the starting point
if len(A__ ) == 0:
return visited
def a_ ( self : str , a__ : Optional[Any]=-1 ) -> List[str]:
'''simple docstring'''
if c == -1:
_A = floor(random() * 1_00_00 ) + 10
for i in range(A__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
_A = floor(random() * c ) + 1
if n != i:
self.add_pair(A__ , A__ , 1 )
def a_ ( self : Union[str, Any] , a__ : List[Any]=-2 ) -> Optional[Any]:
'''simple docstring'''
_A = deque()
_A = []
if s == -2:
_A = list(self.graph )[0]
d.append(A__ )
visited.append(A__ )
while d:
_A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def a_ ( self : int , a__ : List[Any] ) -> str:
'''simple docstring'''
return len(self.graph[u] )
def a_ ( self : Optional[int] ) -> int:
'''simple docstring'''
_A = []
_A = []
_A = list(self.graph )[0]
stack.append(A__ )
visited.append(A__ )
_A = -2
_A = []
_A = s
_A = False
_A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_A = len(A__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_A = True
if len(A__ ) != 0:
_A = stack[len(A__ ) - 1]
else:
_A = False
indirect_parents.append(A__ )
_A = s
_A = ss
# check if se have reached the starting point
if len(A__ ) == 0:
return list(A__ )
def a_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_A = []
_A = []
_A = list(self.graph )[0]
stack.append(A__ )
visited.append(A__ )
_A = -2
_A = []
_A = s
_A = False
_A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_A = len(A__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_A = True
if len(A__ ) != 0:
_A = stack[len(A__ ) - 1]
else:
_A = False
indirect_parents.append(A__ )
_A = s
_A = ss
# check if se have reached the starting point
if len(A__ ) == 0:
return False
def a_ ( self : List[str] ) -> Any:
'''simple docstring'''
return list(self.graph )
def a_ ( self : int , a__ : Optional[Any]=-2 , a__ : Any=-1 ) -> List[str]:
'''simple docstring'''
_A = time()
self.dfs(A__ , A__ )
_A = time()
return end - begin
def a_ ( self : List[str] , a__ : Dict=-2 ) -> int:
'''simple docstring'''
_A = time()
self.bfs(A__ )
_A = time()
return end - begin
| 700
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 621
| 0
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ""
lowerCamelCase_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCamelCase_ = None # compression type in fsspec. ex: "gzip"
lowerCamelCase_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self :Optional[int] , __A :str = "" , __A :Optional[str] = None , __A :Optional[dict] = None , **__A :List[str] ) -> Any:
"""simple docstring"""
super().__init__(self , **__A )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
SCREAMING_SNAKE_CASE__ = fsspec.open(
__A , mode="""rb""" , protocol=__A , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
SCREAMING_SNAKE_CASE__ = os.path.basename(self.file.path.split("""::""" )[0] )
SCREAMING_SNAKE_CASE__ = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
SCREAMING_SNAKE_CASE__ = None
@classmethod
def _snake_case ( cls :Any , __A :Tuple ) -> List[str]:
"""simple docstring"""
return super()._strip_protocol(__A ).lstrip("""/""" )
def _snake_case ( self :Union[str, Any] ) -> Tuple:
"""simple docstring"""
if self.dir_cache is None:
SCREAMING_SNAKE_CASE__ = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
SCREAMING_SNAKE_CASE__ = {f["""name"""]: f}
def _snake_case ( self :Optional[int] , __A :str ) -> str:
"""simple docstring"""
return self.file.open().read()
def _snake_case ( self :List[str] , __A :str , __A :str = "rb" , __A :int=None , __A :List[str]=True , __A :Optional[Any]=None , **__A :Union[str, Any] , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._strip_protocol(__A )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "bz2"
lowerCamelCase_ = "bz2"
lowerCamelCase_ = ".bz2"
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "gzip"
lowerCamelCase_ = "gzip"
lowerCamelCase_ = ".gz"
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "lz4"
lowerCamelCase_ = "lz4"
lowerCamelCase_ = ".lz4"
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "xz"
lowerCamelCase_ = "xz"
lowerCamelCase_ = ".xz"
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "zstd"
lowerCamelCase_ = "zstd"
lowerCamelCase_ = ".zst"
def __init__( self :List[Any] , __A :str , __A :str = "rb" , __A :Optional[str] = None , __A :Optional[dict] = None , __A :int = DEFAULT_BLOCK_SIZE , **__A :Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
fo=__A , mode=__A , target_protocol=__A , target_options=__A , block_size=__A , **__A , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
SCREAMING_SNAKE_CASE__ = self.file.__enter__
class UpperCamelCase_ :
def __init__( self :int , __A :Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = file_
def __enter__( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self :Optional[Any] , *__A :List[Any] , **__A :int ) -> Tuple:
"""simple docstring"""
self._file.__exit__(*__A , **__A )
def __iter__( self :Any ) -> Optional[int]:
"""simple docstring"""
return iter(self._file )
def _snake_case ( self :Dict ) -> Dict:
"""simple docstring"""
return next(self._file )
def __getattr__( self :Union[str, Any] , __A :List[str] ) -> Optional[Any]:
"""simple docstring"""
return getattr(self._file , __A )
def fixed_enter(*__A :List[Any] , **__A :Optional[int] ):
return WrappedFile(_enter(*__A , **__A ) )
SCREAMING_SNAKE_CASE__ = fixed_enter
| 6
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple=True ):
model.train()
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = F.mse_loss(UpperCamelCase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[Any]=False ):
set_seed(42 )
SCREAMING_SNAKE_CASE__ = RegressionModel()
SCREAMING_SNAKE_CASE__ = deepcopy(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__ = AdamW(params=ddp_model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 )
SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 )
# Make a copy of `model`
if sched:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
# Test when on a single CPU or GPU that the context manager does nothing
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
# Test on distributed setup that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int=False , UpperCamelCase__: Union[str, Any]=False ):
SCREAMING_SNAKE_CASE__ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple=False , UpperCamelCase__: List[str]=False ):
SCREAMING_SNAKE_CASE__ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ , UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
SCREAMING_SNAKE_CASE__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=96 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if iteration < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if batch_num < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(UpperCamelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(UpperCamelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 6
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class _lowercase ( UpperCamelCase_ ):
'''simple docstring'''
_A = 42
_A = 42
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__lowerCAmelCase ) )]
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
UpperCAmelCase__ : str = all_rotations(__lowerCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
UpperCAmelCase__ : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowerCAmelCase ),
}
return response
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : str ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
UpperCAmelCase__ : str = int(__lowerCAmelCase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__lowerCAmelCase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
UpperCAmelCase__ : List[str] = [""""""] * len(__lowerCAmelCase )
for _ in range(len(__lowerCAmelCase ) ):
for i in range(len(__lowerCAmelCase ) ):
UpperCAmelCase__ : Any = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
A__ : Union[str, Any] = "Provide a string that I will generate its BWT transform: "
A__ : List[str] = input(entry_msg).strip()
A__ : List[str] = bwt_transform(s)
print(
f"""Burrows Wheeler transform for string \'{s}\' results """
f"""in \'{result["bwt_string"]}\'"""
)
A__ : Dict = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
f"""Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' """
f"""we get original string \'{original_string}\'"""
)
| 712
|
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
'''simple docstring'''
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[int] = GPTaConfig()
else:
UpperCAmelCase__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = GPTaModel(lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
UpperCAmelCase__ : Optional[int] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase__ : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
A__ : Optional[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660
| 0
|
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__UpperCamelCase = TypeVar('T')
__UpperCamelCase = TypeVar('U')
class lowerCamelCase__ ( Generic[T, U] ):
"""simple docstring"""
def __init__( self , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = key
UpperCamelCase__ = val
UpperCamelCase__ = None
UpperCamelCase__ = None
def __repr__( self ):
'''simple docstring'''
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class lowerCamelCase__ ( Generic[T, U] ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
UpperCamelCase__ = DoubleLinkedListNode(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ = DoubleLinkedListNode(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__, UpperCamelCase__ = self.rear, self.head
def __repr__( self ):
'''simple docstring'''
UpperCamelCase__ = ["DoubleLinkedList"]
UpperCamelCase__ = self.head
while node.next is not None:
rep.append(str(UpperCamelCase_ ) )
UpperCamelCase__ = node.next
rep.append(str(self.rear ) )
return ",\n ".join(UpperCamelCase_ )
def snake_case__ ( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
UpperCamelCase__ = node
UpperCamelCase__ = previous
UpperCamelCase__ = node
UpperCamelCase__ = self.rear
def snake_case__ ( self , snake_case ):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
UpperCamelCase__ = node.next
UpperCamelCase__ = node.prev
UpperCamelCase__ = None
UpperCamelCase__ = None
return node
class lowerCamelCase__ ( Generic[T, U] ):
"""simple docstring"""
_UpperCamelCase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = DoubleLinkedList()
UpperCamelCase__ = capacity
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = {}
def __repr__( self ):
'''simple docstring'''
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self , snake_case ):
'''simple docstring'''
return key in self.cache
def snake_case__ ( self , snake_case ):
'''simple docstring'''
if key in self.cache:
self.hits += 1
UpperCamelCase__ = self.cache[key]
UpperCamelCase__ = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(UpperCamelCase_ )
return node.val
self.miss += 1
return None
def snake_case__ ( self , snake_case , snake_case ):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
UpperCamelCase__ = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(UpperCamelCase_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
UpperCamelCase__ = DoubleLinkedListNode(UpperCamelCase_ , UpperCamelCase_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
UpperCamelCase__ = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
UpperCamelCase__ = value
self.list.add(UpperCamelCase_ )
@classmethod
def snake_case__ ( cls , snake_case = 128 ):
'''simple docstring'''
def cache_decorator_inner(snake_case ) -> Callable[..., U]:
def cache_decorator_wrapper(*snake_case ) -> U:
if func not in cls.decorator_function_to_instance_map:
UpperCamelCase__ = LRUCache(UpperCamelCase_ )
UpperCamelCase__ = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
UpperCamelCase__ = func(*UpperCamelCase_ )
cls.decorator_function_to_instance_map[func].put(args[0] , UpperCamelCase_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(UpperCamelCase_ , "cache_info" , UpperCamelCase_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 551
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = "bert"
def __init__( self: str , UpperCamelCase_: List[Any]=3_0522 , UpperCamelCase_: Optional[Any]=768 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: List[Any]=12 , UpperCamelCase_: Any=3072 , UpperCamelCase_: str="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: int=512 , UpperCamelCase_: str=2 , UpperCamelCase_: List[Any]=0.02 , UpperCamelCase_: int=1e-1_2 , UpperCamelCase_: List[Any]=0 , UpperCamelCase_: int="absolute" , UpperCamelCase_: int=True , UpperCamelCase_: str=None , **UpperCamelCase_: List[Any] , ):
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase_ =vocab_size
UpperCamelCase_ =hidden_size
UpperCamelCase_ =num_hidden_layers
UpperCamelCase_ =num_attention_heads
UpperCamelCase_ =hidden_act
UpperCamelCase_ =intermediate_size
UpperCamelCase_ =hidden_dropout_prob
UpperCamelCase_ =attention_probs_dropout_prob
UpperCamelCase_ =max_position_embeddings
UpperCamelCase_ =type_vocab_size
UpperCamelCase_ =initializer_range
UpperCamelCase_ =layer_norm_eps
UpperCamelCase_ =position_embedding_type
UpperCamelCase_ =use_cache
UpperCamelCase_ =classifier_dropout
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self: int ):
if self.task == "multiple-choice":
UpperCamelCase_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 391
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
class snake_case ( lowercase_ ):
"""simple docstring"""
_a = ["""input_features""", """attention_mask"""]
def __init__( self, _lowercase=80, _lowercase=16000, _lowercase=0.0, _lowercase=10, _lowercase=25, _lowercase="hamming_window", _lowercase=32768.0, _lowercase=0.97, _lowercase=1.0, _lowercase=True, _lowercase=True, _lowercase=False, **_lowercase, ) -> int:
super().__init__(feature_size=_lowercase, sampling_rate=_lowercase, padding_value=_lowercase, **_lowercase )
SCREAMING_SNAKE_CASE_ = feature_size
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = padding_value
SCREAMING_SNAKE_CASE_ = hop_length
SCREAMING_SNAKE_CASE_ = win_length
SCREAMING_SNAKE_CASE_ = frame_signal_scale
SCREAMING_SNAKE_CASE_ = preemphasis_coeff
SCREAMING_SNAKE_CASE_ = mel_floor
SCREAMING_SNAKE_CASE_ = normalize_means
SCREAMING_SNAKE_CASE_ = normalize_vars
SCREAMING_SNAKE_CASE_ = win_function
SCREAMING_SNAKE_CASE_ = return_attention_mask
SCREAMING_SNAKE_CASE_ = win_length * sampling_rate // 1000
SCREAMING_SNAKE_CASE_ = hop_length * sampling_rate // 1000
SCREAMING_SNAKE_CASE_ = optimal_fft_length(self.sample_size )
SCREAMING_SNAKE_CASE_ = (self.n_fft // 2) + 1
def a__ ( self, _lowercase ) -> np.ndarray:
if self.win_function == "hamming_window":
SCREAMING_SNAKE_CASE_ = window_function(window_length=self.sample_size, name=self.win_function, periodic=_lowercase )
else:
SCREAMING_SNAKE_CASE_ = window_function(window_length=self.sample_size, name=self.win_function )
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.n_freqs, num_mel_filters=self.feature_size, min_frequency=0.0, max_frequency=self.sampling_rate / 2.0, sampling_rate=self.sampling_rate, )
SCREAMING_SNAKE_CASE_ = spectrogram(
one_waveform * self.frame_signal_scale, window=_lowercase, frame_length=self.sample_size, hop_length=self.sample_stride, fft_length=self.n_fft, center=_lowercase, preemphasis=self.preemphasis_coeff, mel_filters=_lowercase, mel_floor=self.mel_floor, log_mel='log', )
return msfc_features.T
def a__ ( self, _lowercase, _lowercase, _lowercase ) -> Optional[int]:
# make sure we normalize float32 arrays
if self.normalize_means:
SCREAMING_SNAKE_CASE_ = x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE_ = np.subtract(_lowercase, _lowercase )
if self.normalize_vars:
SCREAMING_SNAKE_CASE_ = x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE_ = np.divide(_lowercase, _lowercase )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE_ = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE_ = x.astype(np.floataa )
return x
def a__ ( self, _lowercase, _lowercase = None ) -> List[np.ndarray]:
SCREAMING_SNAKE_CASE_ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_lowercase, _lowercase, self.padding_value ) for x, n in zip(_lowercase, _lowercase )]
def __call__( self, _lowercase, _lowercase = False, _lowercase = None, _lowercase = False, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = None, **_lowercase, ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE_ = isinstance(_lowercase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE_ = is_batched_numpy or (
isinstance(_lowercase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowercase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowercase, np.ndarray ):
SCREAMING_SNAKE_CASE_ = np.asarray(_lowercase, dtype=np.floataa )
elif isinstance(_lowercase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE_ = [self._extract_mfsc_features(_lowercase ) for one_waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE_ = BatchFeature({'input_features': features} )
SCREAMING_SNAKE_CASE_ = self.pad(
_lowercase, padding=_lowercase, max_length=_lowercase, truncation=_lowercase, pad_to_multiple_of=_lowercase, return_attention_mask=_lowercase, **_lowercase, )
# make sure list is in array format
SCREAMING_SNAKE_CASE_ = padded_inputs.get('input_features' )
if isinstance(input_features[0], _lowercase ):
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowercase, dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE_ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowercase, dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
SCREAMING_SNAKE_CASE_ = (
np.array(_lowercase, dtype=np.intaa )
if self._get_padding_strategies(_lowercase, max_length=_lowercase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
SCREAMING_SNAKE_CASE_ = self.normalize(
padded_inputs['input_features'], attention_mask=_lowercase )
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ = padded_inputs.convert_to_tensors(_lowercase )
return padded_inputs
| 708
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : str = {
"configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusXForConditionalGeneration",
"PegasusXModel",
"PegasusXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 238
| 0
|
'''simple docstring'''
from math import factorial, pi
def UpperCamelCase__ ( __magic_name__ : float , __magic_name__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(__magic_name__ , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(__magic_name__ , __magic_name__ ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
snake_case__ : List[str] = float(__magic_name__ )
snake_case__ : List[Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__magic_name__ ) )
def UpperCamelCase__ ( __magic_name__ : float , __magic_name__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(__magic_name__ , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(__magic_name__ , __magic_name__ ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
snake_case__ : Tuple = float(__magic_name__ )
snake_case__ : List[str] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 38
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=10 , __UpperCamelCase=18 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=None , ):
_lowercase = size if size is not None else {"""shortest_edge""": 18}
_lowercase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = num_frames
_lowercase = image_size
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = crop_size
def UpperCamelCase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a_ ( _a , unittest.TestCase ):
a : str = VivitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
_lowercase = VivitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
def UpperCamelCase_ ( self ):
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def UpperCamelCase_ ( self ):
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_lowercase = prepare_video_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for video in video_inputs:
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_lowercase = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowercase = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self ):
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_video_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for video in video_inputs:
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_lowercase = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowercase = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self ):
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_video_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for video in video_inputs:
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_lowercase = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowercase = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 287
| 0
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE = """hf-internal-testing/tiny-random-t5"""
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(a )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(a )
SCREAMING_SNAKE_CASE = tokenizer("""This is me""" , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
SCREAMING_SNAKE_CASE = model.generate(**a )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(a )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
SCREAMING_SNAKE_CASE = model_reloaded.generate(**a )
self.assertTrue(torch.allclose(a , a ) )
def _UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE = """hf-internal-testing/tiny-random-t5"""
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(a )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(a ):
model.save_pretrained(a )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
model.save_pretrained(a )
| 450
|
from __future__ import annotations
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
create_state_space_tree(SCREAMING_SNAKE_CASE , [] , 0 , [0 for i in range(len(SCREAMING_SNAKE_CASE ) )] )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
if index == len(SCREAMING_SNAKE_CASE ):
print(SCREAMING_SNAKE_CASE )
return
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE = True
create_state_space_tree(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 , SCREAMING_SNAKE_CASE )
current_sequence.pop()
SCREAMING_SNAKE_CASE = False
__A : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__A : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 450
| 1
|
import numpy as np
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
a_ : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
a_ : Union[str, Any] = 2_5_0_0_0_4
a_ : int = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase):
"""simple docstring"""
_A = MBartaaTokenizer
_A = MBartaaTokenizerFast
_A = True
_A = True
def _a (self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase = MBartaaTokenizer(__a , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def _a (self ):
'''simple docstring'''
lowerCamelCase = "<s>"
lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__a ) , 10_54 )
def _a (self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def _a (self ):
'''simple docstring'''
lowerCamelCase = MBartaaTokenizer(__a , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__a )
lowerCamelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowerCamelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
lowerCamelCase = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCamelCase = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def _a (self ):
'''simple docstring'''
lowerCamelCase = {"input_ids": [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def _a (self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a )
lowerCamelCase = self.tokenizer_class.from_pretrained(__a , **__a )
lowerCamelCase = tempfile.mkdtemp()
lowerCamelCase = tokenizer_r.save_pretrained(__a )
lowerCamelCase = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
lowerCamelCase = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
lowerCamelCase = tokenizer_r.from_pretrained(__a )
lowerCamelCase = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=True
lowerCamelCase = tempfile.mkdtemp()
lowerCamelCase = tokenizer_r.save_pretrained(__a , legacy_format=__a )
lowerCamelCase = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
lowerCamelCase = tokenizer_r.from_pretrained(__a )
lowerCamelCase = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=False
lowerCamelCase = tempfile.mkdtemp()
lowerCamelCase = tokenizer_r.save_pretrained(__a , legacy_format=__a )
lowerCamelCase = tokenizer_p.save_pretrained(__a )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase = tokenizer_r.from_pretrained(__a )
lowerCamelCase = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
_A = 'facebook/mbart-large-50-one-to-many-mmt'
_A = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_A = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_A = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def _a (cls ):
'''simple docstring'''
lowerCamelCase = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
lowerCamelCase = 1
return cls
def _a (self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 25_00_38 )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def _a (self ):
'''simple docstring'''
self.assertIn(__a , self.tokenizer.all_special_ids )
lowerCamelCase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
lowerCamelCase = self.tokenizer.decode(__a , skip_special_tokens=__a )
lowerCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , __a )
lowerCamelCase = 10
lowerCamelCase = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0]
self.assertEqual(ids[0] , __a )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__a ) , __a )
def _a (self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_00_53, 25_00_01] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = tempfile.mkdtemp()
lowerCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a )
lowerCamelCase = MBartaaTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a )
@require_torch
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors="pt" )
lowerCamelCase = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowerCamelCase = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors="pt" )
lowerCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=10 , return_tensors="pt" )
lowerCamelCase = targets["input_ids"]
lowerCamelCase = shift_tokens_right(__a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(__a ) , {
# en_XX, A, test, EOS
"input_ids": [[25_00_04, 62, 30_34, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_00_01,
} , )
| 623
| 1
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ =LEDTokenizer
lowerCAmelCase__ =LEDTokenizerFast
lowerCAmelCase__ =True
def UpperCAmelCase ( self ) -> str:
"""simple docstring"""
super().setUp()
snake_case__ : Tuple =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
snake_case__ : Tuple =dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : Union[str, Any] =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case__ : Optional[Any] ={'''unk_token''': '''<unk>'''}
snake_case__ : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : Union[str, Any] =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
snake_case__ : Optional[Any] =[0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : List[str] =tokenizer(__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
snake_case__ : List[str] =batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@require_torch
def UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : List[Any] =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : str =tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIn('''input_ids''' , __SCREAMING_SNAKE_CASE )
self.assertIn('''attention_mask''' , __SCREAMING_SNAKE_CASE )
self.assertNotIn('''labels''' , __SCREAMING_SNAKE_CASE )
self.assertNotIn('''decoder_attention_mask''' , __SCREAMING_SNAKE_CASE )
@require_torch
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Tuple =[
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : List[Any] =tokenizer(text_target=__SCREAMING_SNAKE_CASE , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : Union[str, Any] =tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def UpperCAmelCase ( self ) -> str:
"""simple docstring"""
snake_case__ : List[Any] =['''A long paragraph for summarization.''']
snake_case__ : Tuple =[
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : str =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
snake_case__ : Dict =tokenizer(text_target=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
snake_case__ : Optional[Any] =inputs['''input_ids''']
snake_case__ : Optional[int] =targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : Optional[int] =['''Summary of the text.''', '''Another summary.''']
snake_case__ : Dict =[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
snake_case__ : int =tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple =[[0] * len(__SCREAMING_SNAKE_CASE ) for x in encoded_output['''input_ids''']]
snake_case__ : List[Any] =tokenizer.pad(__SCREAMING_SNAKE_CASE )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case__ : Optional[int] =self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] =self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] ='''A, <mask> AllenNLP sentence.'''
snake_case__ : Optional[int] =tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] =tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
snake_case__ : Any =tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
snake_case__ : str =tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 408
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ =StableDiffusionSAGPipeline
lowerCAmelCase__ =TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ =TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ =TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ =TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ =False
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case__ : List[str] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
snake_case__ : List[str] =DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
snake_case__ : int =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case__ : Optional[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case__ : Optional[int] =CLIPTextModel(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case__ : Optional[int] ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> Dict:
"""simple docstring"""
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
snake_case__ : Optional[int] =torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Dict =torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] ={
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Tuple =StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
snake_case__ : Any =sag_pipe.to(__SCREAMING_SNAKE_CASE )
sag_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : str ='''.'''
snake_case__ : Tuple =torch.manual_seed(0 )
snake_case__ : Optional[int] =sag_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
snake_case__ : Optional[int] =output.images
snake_case__ : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case__ : Optional[Any] =np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Tuple =StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
snake_case__ : Any =sag_pipe.to(__SCREAMING_SNAKE_CASE )
sag_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict ='''.'''
snake_case__ : Union[str, Any] =torch.manual_seed(0 )
snake_case__ : int =sag_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
snake_case__ : List[Any] =output.images
snake_case__ : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case__ : Union[str, Any] =np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[Any] =StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
snake_case__ : Dict =sag_pipe.to(__SCREAMING_SNAKE_CASE )
sag_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] ='''.'''
snake_case__ : str =torch.manual_seed(0 )
snake_case__ : Any =sag_pipe(
[prompt] , width=768 , height=512 , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
snake_case__ : Any =output.images
assert image.shape == (1, 512, 768, 3)
| 408
| 1
|
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__A : Optional[Any] = logging.get_logger(__name__)
__A : Dict = "T5Config"
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = 'mt5'
__magic_name__ = MTaConfig
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = 'mt5'
__magic_name__ = MTaConfig
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = 'mt5'
__magic_name__ = MTaConfig
| 27
|
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def lowerCAmelCase__ ( self ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = NystromformerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
_A = model(snake_case_ , token_type_ids=snake_case_ )
_A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = NystromformerForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = NystromformerForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = NystromformerForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = NystromformerForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_choices
_A = NystromformerForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self ):
_A = self.prepare_config_and_inputs()
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = config_and_inputs
_A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def lowerCAmelCase__ ( self ):
_A = NystromformerModelTester(self )
_A = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A = type
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def lowerCAmelCase__ ( self ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = NystromformerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ):
_A = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
_A = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_A = model(snake_case_ )[0]
_A = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , snake_case_ )
_A = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self ):
_A = 'the [MASK] of Belgium is Brussels'
_A = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
_A = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
_A = tokenizer(snake_case_ , return_tensors='pt' )
with torch.no_grad():
_A = model(encoding.input_ids ).logits
_A = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case_ ) , 'capital' )
| 27
| 1
|
class lowercase ( SCREAMING_SNAKE_CASE__ ):
pass
class lowercase ( SCREAMING_SNAKE_CASE__ ):
pass
class lowercase :
def __init__( self):
lowercase = [
[],
[],
[],
]
def A__ ( self ,A__ ,A__):
try:
if len(self.queues[priority]) >= 1_0_0:
raise OverflowError('''Maximum queue size is 100''')
self.queues[priority].append(A__)
except IndexError:
raise ValueError('''Valid priorities are 0, 1, and 2''')
def A__ ( self):
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError('''All queues are empty''')
def __str__( self):
return "\n".join(f'Priority {i}: {q}' for i, q in enumerate(self.queues))
class lowercase :
def __init__( self):
lowercase = []
def A__ ( self ,A__):
if len(self.queue) == 1_0_0:
raise OverFlowError('''Maximum queue size is 100''')
self.queue.append(A__)
def A__ ( self):
if not self.queue:
raise UnderFlowError('''The queue is empty''')
else:
lowercase = min(self.queue)
self.queue.remove(A__)
return data
def __str__( self):
return str(self.queue)
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(lowerCAmelCase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCAmelCase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowerCAmelCase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCAmelCase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 716
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase__ :Any = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def UpperCamelCase ( lowerCAmelCase__=None ):
'''simple docstring'''
if subparsers is not None:
lowercase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
lowercase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
lowercase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=lowerCAmelCase__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=lowerCAmelCase__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
lowercase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=lowerCAmelCase__ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCAmelCase__ ):
lowercase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowercase = defaults.command_file
if not args.command and defaults.commands is not None:
lowercase = defaults.commands
if not args.tpu_name:
lowercase = defaults.tpu_name
if not args.tpu_zone:
lowercase = defaults.tpu_zone
if args.accelerate_version == "dev":
lowercase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
lowercase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , lowerCAmelCase__ ):
lowercase = f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
lowercase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCAmelCase__ ):
lowercase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowercase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
lowercase = '''; '''.join(lowerCAmelCase__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowercase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(lowerCAmelCase__ )}' )
return
subprocess.run(lowerCAmelCase__ )
print('''Successfully setup pod.''' )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = tpu_command_parser()
lowercase = parser.parse_args()
tpu_command_launcher(lowerCAmelCase__ )
| 633
| 0
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a ( __UpperCamelCase ):
def __init__( self : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple=13 , UpperCAmelCase : Union[str, Any]=7 , UpperCAmelCase : List[str]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Any=True , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Any=False , UpperCAmelCase : Dict=False , UpperCAmelCase : str=2 , UpperCAmelCase : str=99 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : Optional[Any]=5 , UpperCAmelCase : List[Any]=4 , UpperCAmelCase : str=0.1 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Optional[int]=5_12 , UpperCAmelCase : List[str]=12 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Tuple=0.02 , UpperCAmelCase : List[Any]=3 , UpperCAmelCase : List[Any]=4 , UpperCAmelCase : Tuple="last" , UpperCAmelCase : List[Any]=None , UpperCAmelCase : List[Any]=None , ):
lowerCAmelCase_ : Tuple = parent
lowerCAmelCase_ : Any = batch_size
lowerCAmelCase_ : int = seq_length
lowerCAmelCase_ : Any = is_training
lowerCAmelCase_ : Any = use_input_lengths
lowerCAmelCase_ : List[str] = use_token_type_ids
lowerCAmelCase_ : Union[str, Any] = use_labels
lowerCAmelCase_ : int = gelu_activation
lowerCAmelCase_ : List[str] = sinusoidal_embeddings
lowerCAmelCase_ : List[Any] = causal
lowerCAmelCase_ : Optional[int] = asm
lowerCAmelCase_ : Tuple = n_langs
lowerCAmelCase_ : int = vocab_size
lowerCAmelCase_ : str = n_special
lowerCAmelCase_ : List[Any] = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : List[Any] = num_attention_heads
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : List[str] = max_position_embeddings
lowerCAmelCase_ : Any = type_vocab_size
lowerCAmelCase_ : Tuple = type_sequence_label_size
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : Union[str, Any] = num_labels
lowerCAmelCase_ : Optional[Any] = num_choices
lowerCAmelCase_ : Dict = summary_type
lowerCAmelCase_ : int = use_proj
lowerCAmelCase_ : Optional[Any] = scope
def A ( self : List[str] ):
lowerCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Any = None
if self.use_input_lengths:
lowerCAmelCase_ : str = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase_ : Dict = None
if self.use_token_type_ids:
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : int = None
lowerCAmelCase_ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : str = ids_tensor([self.batch_size] , 2 ).float()
lowerCAmelCase_ : str = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : Tuple = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def A ( self : Any ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def A ( self : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Dict , ):
lowerCAmelCase_ : List[str] = FlaubertModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Dict = model(UpperCAmelCase , lengths=UpperCAmelCase , langs=UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = model(UpperCAmelCase , langs=UpperCAmelCase )
lowerCAmelCase_ : Any = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : int , ):
lowerCAmelCase_ : Optional[Any] = FlaubertWithLMHeadModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : List[str] = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , ):
lowerCAmelCase_ : Any = FlaubertForQuestionAnsweringSimple(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Tuple = model(UpperCAmelCase )
lowerCAmelCase_ : Any = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , ):
lowerCAmelCase_ : Optional[Any] = FlaubertForQuestionAnswering(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Union[str, Any] = model(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , p_mask=UpperCAmelCase , )
lowerCAmelCase_ : Union[str, Any] = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , )
((lowerCAmelCase_) , ) : List[Any] = result_with_labels.to_tuple()
lowerCAmelCase_ : Any = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
((lowerCAmelCase_) , ) : Optional[int] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def A ( self : int , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , ):
lowerCAmelCase_ : List[str] = FlaubertForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Any = model(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , ):
lowerCAmelCase_ : List[Any] = self.num_labels
lowerCAmelCase_ : Any = FlaubertForTokenClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : int = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , ):
lowerCAmelCase_ : List[str] = self.num_choices
lowerCAmelCase_ : Any = FlaubertForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Optional[Any] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : List[Any] ):
lowerCAmelCase_ : List[str] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : List[Any] = config_and_inputs
lowerCAmelCase_ : Tuple = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : int = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__snake_case : List[str] = (
{
"""feature-extraction""": FlaubertModel,
"""fill-mask""": FlaubertWithLMHeadModel,
"""question-answering""": FlaubertForQuestionAnsweringSimple,
"""text-classification""": FlaubertForSequenceClassification,
"""token-classification""": FlaubertForTokenClassification,
"""zero-shot""": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def A ( self : int , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : str ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def A ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int=False ):
lowerCAmelCase_ : List[str] = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCAmelCase_ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
lowerCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
return inputs_dict
def A ( self : int ):
lowerCAmelCase_ : List[str] = FlaubertModelTester(self )
lowerCAmelCase_ : List[Any] = ConfigTester(self , config_class=UpperCAmelCase , emb_dim=37 )
def A ( self : Optional[int] ):
self.config_tester.run_common_tests()
def A ( self : Any ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*UpperCAmelCase )
def A ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*UpperCAmelCase )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*UpperCAmelCase )
def A ( self : Tuple ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*UpperCAmelCase )
def A ( self : Any ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCAmelCase )
def A ( self : int ):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*UpperCAmelCase )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*UpperCAmelCase )
@slow
def A ( self : Optional[Any] ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Optional[Any] = FlaubertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@slow
@require_torch_gpu
def A ( self : Dict ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCAmelCase_ : Any = True
lowerCAmelCase_ : Tuple = model_class(config=UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Any = torch.jit.trace(
UpperCAmelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCAmelCase , os.path.join(UpperCAmelCase , """traced_model.pt""" ) )
lowerCAmelCase_ : Union[str, Any] = torch.jit.load(os.path.join(UpperCAmelCase , """traced_model.pt""" ) , map_location=UpperCAmelCase )
loaded(inputs_dict["""input_ids"""].to(UpperCAmelCase ) , inputs_dict["""attention_mask"""].to(UpperCAmelCase ) )
@require_torch
class __a ( unittest.TestCase ):
@slow
def A ( self : Tuple ):
lowerCAmelCase_ : Tuple = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowerCAmelCase_ : Optional[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
with torch.no_grad():
lowerCAmelCase_ : Any = model(UpperCAmelCase )[0]
lowerCAmelCase_ : Union[str, Any] = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , UpperCAmelCase )
lowerCAmelCase_ : List[Any] = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
| 600
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__UpperCAmelCase = logging.get_logger(__name__)
enable_full_determinism()
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : Any = UNetaDModel
__snake_case : str = """sample"""
@property
def A ( self : List[Any] ):
lowerCAmelCase_ : str = 4
lowerCAmelCase_ : List[str] = 3
lowerCAmelCase_ : Tuple = (32, 32)
lowerCAmelCase_ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
lowerCAmelCase_ : Tuple = torch.tensor([10] ).to(UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def A ( self : List[str] ):
return (3, 32, 32)
@property
def A ( self : List[Any] ):
return (3, 32, 32)
def A ( self : Optional[int] ):
lowerCAmelCase_ : Any = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
lowerCAmelCase_ : Dict = self.dummy_input
return init_dict, inputs_dict
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : Dict = UNetaDModel
__snake_case : List[str] = """sample"""
@property
def A ( self : Optional[Any] ):
lowerCAmelCase_ : List[Any] = 4
lowerCAmelCase_ : Dict = 4
lowerCAmelCase_ : Optional[Any] = (32, 32)
lowerCAmelCase_ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
lowerCAmelCase_ : List[str] = torch.tensor([10] ).to(UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def A ( self : Any ):
return (4, 32, 32)
@property
def A ( self : str ):
return (4, 32, 32)
def A ( self : List[str] ):
lowerCAmelCase_ : str = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
lowerCAmelCase_ : Dict = self.dummy_input
return init_dict, inputs_dict
def A ( self : Optional[int] ):
lowerCAmelCase_ , lowerCAmelCase_ : Dict = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def A ( self : List[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : Any = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCAmelCase )
model.to(UpperCAmelCase )
lowerCAmelCase_ : Any = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def A ( self : List[str] ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCAmelCase )
model_accelerate.to(UpperCAmelCase )
model_accelerate.eval()
lowerCAmelCase_ : int = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCAmelCase_ : Dict = noise.to(UpperCAmelCase )
lowerCAmelCase_ : int = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase )
lowerCAmelCase_ : List[str] = model_accelerate(UpperCAmelCase , UpperCAmelCase )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCAmelCase_ , lowerCAmelCase_ : int = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCAmelCase , low_cpu_mem_usage=UpperCAmelCase )
model_normal_load.to(UpperCAmelCase )
model_normal_load.eval()
lowerCAmelCase_ : List[str] = model_normal_load(UpperCAmelCase , UpperCAmelCase )["""sample"""]
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-3 )
def A ( self : Tuple ):
lowerCAmelCase_ : Optional[int] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(UpperCAmelCase )
lowerCAmelCase_ : Tuple = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCAmelCase_ : List[str] = noise.to(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(UpperCAmelCase , UpperCAmelCase ).sample
lowerCAmelCase_ : str = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase_ : Any = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-3 ) )
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : int = UNetaDModel
__snake_case : Any = """sample"""
@property
def A ( self : Optional[int] , UpperCAmelCase : Optional[int]=(32, 32) ):
lowerCAmelCase_ : Optional[Any] = 4
lowerCAmelCase_ : str = 3
lowerCAmelCase_ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
lowerCAmelCase_ : str = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def A ( self : Optional[int] ):
return (3, 32, 32)
@property
def A ( self : Tuple ):
return (3, 32, 32)
def A ( self : Tuple ):
lowerCAmelCase_ : str = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
lowerCAmelCase_ : Any = self.dummy_input
return init_dict, inputs_dict
@slow
def A ( self : str ):
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCAmelCase )
lowerCAmelCase_ : Any = self.dummy_input
lowerCAmelCase_ : Tuple = floats_tensor((4, 3) + (2_56, 2_56) ).to(UpperCAmelCase )
lowerCAmelCase_ : Dict = noise
lowerCAmelCase_ : str = model(**UpperCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def A ( self : Optional[int] ):
lowerCAmelCase_ : Dict = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(UpperCAmelCase )
lowerCAmelCase_ : Any = 4
lowerCAmelCase_ : int = 3
lowerCAmelCase_ : Union[str, Any] = (2_56, 2_56)
lowerCAmelCase_ : List[str] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(UpperCAmelCase , UpperCAmelCase ).sample
lowerCAmelCase_ : Tuple = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase_ : List[Any] = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-2 ) )
def A ( self : List[Any] ):
lowerCAmelCase_ : Optional[Any] = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(UpperCAmelCase )
lowerCAmelCase_ : str = 4
lowerCAmelCase_ : Optional[int] = 3
lowerCAmelCase_ : Tuple = (32, 32)
lowerCAmelCase_ : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
lowerCAmelCase_ : Tuple = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase_ : int = model(UpperCAmelCase , UpperCAmelCase ).sample
lowerCAmelCase_ : List[str] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase_ : Optional[Any] = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-2 ) )
def A ( self : int ):
# not required for this model
pass
| 600
| 1
|
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
lowerCamelCase = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
__lowerCAmelCase = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__lowerCAmelCase = int(re.match(R'.*layer_(\d*).*' , a_ )[1] )
layer_number -= 3
return F"""h.{layer_number}.""" + key
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
__lowerCAmelCase = re.search(R'[^\d](\d+)$' , str(a_ ) )
if bit_search is None:
raise ValueError(F"""`dtype` is not a valid dtype: {dtype}.""" )
__lowerCAmelCase = int(bit_search.groups()[0] )
return bit_size // 8
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if bloom_config_file == "":
__lowerCAmelCase = BloomConfig()
else:
__lowerCAmelCase = BloomConfig.from_json_file(a_ )
if shard_model:
__lowerCAmelCase = os.listdir(a_ )
__lowerCAmelCase = sorted(filter(lambda UpperCAmelCase__ : s.startswith('layer' ) and "model_00" in s , a_ ) )
__lowerCAmelCase = {'''weight_map''': {}, '''metadata''': {}}
__lowerCAmelCase = 0
__lowerCAmelCase = None
__lowerCAmelCase = BloomConfig()
for j, file in enumerate(a_ ):
print('Processing file: {}'.format(a_ ) )
__lowerCAmelCase = None
for i in range(a_ ):
# load all TP files
__lowerCAmelCase = file.replace('model_00' , F"""model_0{i}""" )
__lowerCAmelCase = torch.load(os.path.join(a_ , a_ ) , map_location='cpu' )
# Rename keys in the transformers names
__lowerCAmelCase = list(temp.keys() )
for key in keys:
__lowerCAmelCase = temp.pop(a_ )
if tensors is None:
__lowerCAmelCase = temp
else:
for key in tensors.keys():
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__lowerCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__lowerCAmelCase = torch.cat([tensors[key], temp[key]] , dim=a_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__lowerCAmelCase = tensors[key] / pretraining_tp
torch.save(
a_ , os.path.join(
a_ , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(a_ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
__lowerCAmelCase = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__lowerCAmelCase = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(a_ ) ).zfill(5 ) )
__lowerCAmelCase = BloomConfig()
__lowerCAmelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
__lowerCAmelCase = total_size
with open(a_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(a_ , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
__lowerCAmelCase = json.dumps(a_ , indent=2 , sort_keys=a_ ) + '''\n'''
f.write(a_ )
else:
__lowerCAmelCase = BloomModel(a_ )
__lowerCAmelCase = os.listdir(a_ )
__lowerCAmelCase = sorted(filter(lambda UpperCAmelCase__ : s.startswith('layer' ) and "model_00" in s , a_ ) )
__lowerCAmelCase = None
for i, file in enumerate(a_ ):
__lowerCAmelCase = None
for i in range(a_ ):
# load all TP files
__lowerCAmelCase = file.replace('model_00' , F"""model_0{i}""" )
__lowerCAmelCase = torch.load(os.path.join(a_ , a_ ) , map_location='cpu' )
# Rename keys in the transformers names
__lowerCAmelCase = list(temp.keys() )
for key in keys:
__lowerCAmelCase = temp.pop(a_ )
if tensors is None:
__lowerCAmelCase = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__lowerCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__lowerCAmelCase = torch.cat([tensors[key], temp[key]] , dim=a_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__lowerCAmelCase = tensors[key] / pretraining_tp
__lowerCAmelCase = model.load_state_dict(a_ , strict=a_ )
assert not other_keys.unexpected_keys, F"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
__lowerCAmelCase = set(other_keys.missing_keys )
else:
__lowerCAmelCase = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(a_ , exist_ok=a_ )
__lowerCAmelCase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__lowerCAmelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
__lowerCAmelCase = model.to(config.torch_dtype )
torch.save(model.state_dict() , a_ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(a_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
lowerCamelCase = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 713
|
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=8 ):
"""simple docstring"""
__lowerCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowerCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__=512 , UpperCAmelCase__=512 ):
"""simple docstring"""
__lowerCAmelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
__lowerCAmelCase = np.array(pil_image.convert('RGB' ) )
__lowerCAmelCase = arr.astype(np.floataa ) / 127.5 - 1
__lowerCAmelCase = np.transpose(UpperCAmelCase__ , [2, 0, 1] )
__lowerCAmelCase = torch.from_numpy(UpperCAmelCase__ ).unsqueeze(0 )
return image
class snake_case_ ( _a ):
"""simple docstring"""
def __init__( self , _A , _A , _A , ):
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
__lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def A__ ( self , _A , _A , _A ):
# get the original timestep using init_timestep
__lowerCAmelCase = min(int(num_inference_steps * strength ) , _A )
__lowerCAmelCase = max(num_inference_steps - init_timestep , 0 )
__lowerCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def A__ ( self , _A , _A , _A , _A , _A , _A , _A=None ):
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}""" )
__lowerCAmelCase = image.to(device=_A , dtype=_A )
__lowerCAmelCase = batch_size * num_images_per_prompt
if image.shape[1] == 4:
__lowerCAmelCase = image
else:
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(_A , _A ):
__lowerCAmelCase = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_A )
]
__lowerCAmelCase = torch.cat(_A , dim=0 )
else:
__lowerCAmelCase = self.movq.encode(_A ).latent_dist.sample(_A )
__lowerCAmelCase = self.movq.config.scaling_factor * init_latents
__lowerCAmelCase = torch.cat([init_latents] , dim=0 )
__lowerCAmelCase = init_latents.shape
__lowerCAmelCase = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
__lowerCAmelCase = self.scheduler.add_noise(_A , _A , _A )
__lowerCAmelCase = init_latents
return latents
def A__ ( self , _A=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCAmelCase = torch.device(F"""cuda:{gpu_id}""" )
__lowerCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def A__ ( self , _A=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__lowerCAmelCase = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowerCAmelCase, __lowerCAmelCase = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
__lowerCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A__ ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self , _A , _A , _A , _A = 5_1_2 , _A = 5_1_2 , _A = 1_0_0 , _A = 4.0 , _A = 0.3 , _A = 1 , _A = None , _A = "pil" , _A = True , ):
__lowerCAmelCase = self._execution_device
__lowerCAmelCase = guidance_scale > 1.0
if isinstance(_A , _A ):
__lowerCAmelCase = torch.cat(_A , dim=0 )
__lowerCAmelCase = image_embeds.shape[0]
if isinstance(_A , _A ):
__lowerCAmelCase = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase = image_embeds.repeat_interleave(_A , dim=0 )
__lowerCAmelCase = negative_image_embeds.repeat_interleave(_A , dim=0 )
__lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
if not isinstance(_A , _A ):
__lowerCAmelCase = [image]
if not all(isinstance(_A , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(_A ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
__lowerCAmelCase = torch.cat([prepare_image(_A , _A , _A ) for i in image] , dim=0 )
__lowerCAmelCase = image.to(dtype=image_embeds.dtype , device=_A )
__lowerCAmelCase = self.movq.encode(_A )['latents']
__lowerCAmelCase = latents.repeat_interleave(_A , dim=0 )
self.scheduler.set_timesteps(_A , device=_A )
__lowerCAmelCase, __lowerCAmelCase = self.get_timesteps(_A , _A , _A )
__lowerCAmelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt )
__lowerCAmelCase, __lowerCAmelCase = downscale_height_and_width(_A , _A , self.movq_scale_factor )
__lowerCAmelCase = self.prepare_latents(
_A , _A , _A , _A , image_embeds.dtype , _A , _A )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase = {'image_embeds': image_embeds}
__lowerCAmelCase = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
__lowerCAmelCase, __lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCAmelCase, __lowerCAmelCase = noise_pred.chunk(2 )
__lowerCAmelCase, __lowerCAmelCase = variance_pred.chunk(2 )
__lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCAmelCase, __lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
__lowerCAmelCase = self.movq.decode(_A , force_not_quantize=_A )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__lowerCAmelCase = image * 0.5 + 0.5
__lowerCAmelCase = image.clamp(0 , 1 )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 102
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = DDIMPipeline
lowerCamelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCamelCase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
lowerCamelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowerCamelCase = False
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case : Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
snake_case : Any = DDIMScheduler()
snake_case : str = {'''unet''': unet, '''scheduler''': scheduler}
return components
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=0 ) -> Dict:
"""simple docstring"""
if str(UpperCamelCase__ ).startswith('''mps''' ):
snake_case : Union[str, Any] = torch.manual_seed(UpperCamelCase__ )
else:
snake_case : List[Any] = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
snake_case : Tuple = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
snake_case : Union[str, Any] = '''cpu'''
snake_case : List[Any] = self.get_dummy_components()
snake_case : Any = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : List[Any] = self.get_dummy_inputs(UpperCamelCase__ )
snake_case : int = pipe(**UpperCamelCase__ ).images
snake_case : List[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
snake_case : Optional[Any] = np.array(
[1.0_0_0e0_0, 5.7_1_7e-0_1, 4.7_1_7e-0_1, 1.0_0_0e0_0, 0.0_0_0e0_0, 1.0_0_0e0_0, 3.0_0_0e-0_4, 0.0_0_0e0_0, 9.0_0_0e-0_4] )
snake_case : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3e-3 )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[int] = '''google/ddpm-cifar10-32'''
snake_case : Dict = UNetaDModel.from_pretrained(UpperCamelCase__ )
snake_case : Optional[Any] = DDIMScheduler()
snake_case : str = DDIMPipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
ddim.to(UpperCamelCase__ )
ddim.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Union[str, Any] = torch.manual_seed(0 )
snake_case : Dict = ddim(generator=UpperCamelCase__ , eta=0.0 , output_type='''numpy''' ).images
snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case : Optional[int] = np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
snake_case : List[Any] = '''google/ddpm-ema-bedroom-256'''
snake_case : Optional[int] = UNetaDModel.from_pretrained(UpperCamelCase__ )
snake_case : Optional[Any] = DDIMScheduler.from_pretrained(UpperCamelCase__ )
snake_case : Any = DDIMPipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
ddpm.to(UpperCamelCase__ )
ddpm.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : str = torch.manual_seed(0 )
snake_case : Any = ddpm(generator=UpperCamelCase__ , output_type='''numpy''' ).images
snake_case : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case : str = np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 638
|
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowercase__ = logging.get_logger(__name__)
enable_full_determinism()
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = UNetaDModel
lowerCamelCase = """sample"""
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case : Any = 4
snake_case : List[Any] = 3
snake_case : str = (32, 32)
snake_case : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
snake_case : Tuple = torch.tensor([10] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case : Any = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
snake_case : Dict = self.dummy_input
return init_dict, inputs_dict
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = UNetaDModel
lowerCamelCase = """sample"""
@property
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
snake_case : str = 4
snake_case : Tuple = 4
snake_case : str = (32, 32)
snake_case : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
snake_case : str = torch.tensor([10] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return (4, 32, 32)
@property
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return (4, 32, 32)
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case : List[Any] = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
snake_case : str = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
snake_case ,snake_case : int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(UpperCamelCase__ )
snake_case : List[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
snake_case ,snake_case : str = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=UpperCamelCase__ )
model.to(UpperCamelCase__ )
snake_case : Any = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
snake_case ,snake_case : Any = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=UpperCamelCase__ )
model_accelerate.to(UpperCamelCase__ )
model_accelerate.eval()
snake_case : Optional[Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case : int = noise.to(UpperCamelCase__ )
snake_case : List[str] = torch.tensor([10] * noise.shape[0] ).to(UpperCamelCase__ )
snake_case : Union[str, Any] = model_accelerate(UpperCamelCase__ , UpperCamelCase__ )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
snake_case ,snake_case : List[Any] = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=UpperCamelCase__ , low_cpu_mem_usage=UpperCamelCase__ )
model_normal_load.to(UpperCamelCase__ )
model_normal_load.eval()
snake_case : Union[str, Any] = model_normal_load(UpperCamelCase__ , UpperCamelCase__ )['''sample''']
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-3 )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case : int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(UpperCamelCase__ )
snake_case : Any = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case : Dict = noise.to(UpperCamelCase__ )
snake_case : Any = torch.tensor([10] * noise.shape[0] ).to(UpperCamelCase__ )
with torch.no_grad():
snake_case : Union[str, Any] = model(UpperCamelCase__ , UpperCamelCase__ ).sample
snake_case : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case : Tuple = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-3 ) )
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = UNetaDModel
lowerCamelCase = """sample"""
@property
def lowerCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any]=(32, 32) ) -> Any:
"""simple docstring"""
snake_case : Optional[Any] = 4
snake_case : Tuple = 3
snake_case : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
snake_case : str = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
snake_case : Dict = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
snake_case : Dict = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
snake_case ,snake_case : str = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(UpperCamelCase__ )
snake_case : str = self.dummy_input
snake_case : Dict = floats_tensor((4, 3) + (256, 256) ).to(UpperCamelCase__ )
snake_case : Union[str, Any] = noise
snake_case : List[Any] = model(**UpperCamelCase__ )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
snake_case : int = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(UpperCamelCase__ )
snake_case : Dict = 4
snake_case : Optional[int] = 3
snake_case : Tuple = (256, 256)
snake_case : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
snake_case : List[str] = torch.tensor(batch_size * [1e-4] ).to(UpperCamelCase__ )
with torch.no_grad():
snake_case : List[str] = model(UpperCamelCase__ , UpperCamelCase__ ).sample
snake_case : Dict = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case : Optional[int] = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2 ) )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
snake_case : Union[str, Any] = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(UpperCamelCase__ )
snake_case : Optional[Any] = 4
snake_case : List[Any] = 3
snake_case : Union[str, Any] = (32, 32)
snake_case : Any = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
snake_case : Any = torch.tensor(batch_size * [1e-4] ).to(UpperCamelCase__ )
with torch.no_grad():
snake_case : Dict = model(UpperCamelCase__ , UpperCamelCase__ ).sample
snake_case : Optional[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case : int = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2 ) )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
| 638
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 706
|
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ = head.next, head
while fast and fast.next:
UpperCAmelCase_ = fast.next.next
UpperCAmelCase_ = slow.next
UpperCAmelCase_ = slow.next
UpperCAmelCase_ = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ = None
while second:
UpperCAmelCase_ = second.next
UpperCAmelCase_ = node
UpperCAmelCase_ = second
UpperCAmelCase_ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ = node.next
UpperCAmelCase_ = head.next
return True
def a__ ( lowerCAmelCase__ ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ = UpperCAmelCase_ = UpperCAmelCase_ = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ = [slow.val]
while slow.next:
UpperCAmelCase_ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ = cur.next
return True
def a__ ( lowerCAmelCase__ ):
if not head or not head.next:
return True
UpperCAmelCase_ = {}
UpperCAmelCase_ = 0
while head:
if head.val in d:
d[head.val].append(lowerCAmelCase__ )
else:
UpperCAmelCase_ = [pos]
UpperCAmelCase_ = head.next
pos += 1
UpperCAmelCase_ = pos - 1
UpperCAmelCase_ = 0
for v in d.values():
if len(lowerCAmelCase__ ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ = 0
for i in range(0 , len(lowerCAmelCase__ ) ):
if v[i] + v[len(lowerCAmelCase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 14
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.